diff options
Diffstat (limited to 'testpar')
39 files changed, 42970 insertions, 15757 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index 70e0246..c950b1b 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -1,18 +1,11 @@ -cmake_minimum_required (VERSION 3.1.0) -PROJECT (HDF5_TEST_PAR) +cmake_minimum_required (VERSION 3.18) +project (HDF5_TEST_PAR C) #----------------------------------------------------------------------------- -# Apply Definitions to compiler in this directory and below -#----------------------------------------------------------------------------- -add_definitions (${HDF_EXTRA_C_FLAGS}) - -INCLUDE_DIRECTORIES (${HDF5_TEST_SRC_DIR}) -INCLUDE_DIRECTORIES (${HDF5_TOOLS_SRC_DIR}/lib ) -#----------------------------------------------------------------------------- # Define Tests #----------------------------------------------------------------------------- -set (testphdf5_SRCS +set (testphdf5_SOURCES ${HDF5_TEST_PAR_SOURCE_DIR}/testphdf5.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_dset.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_file.c @@ -24,35 +17,96 @@ set (testphdf5_SRCS ${HDF5_TEST_PAR_SOURCE_DIR}/t_chunk_alloc.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_filter_read.c ${HDF5_TEST_PAR_SOURCE_DIR}/t_prop.c + ${HDF5_TEST_PAR_SOURCE_DIR}/t_coll_md_read.c + ${HDF5_TEST_PAR_SOURCE_DIR}/t_oflush.c ) #-- Adding test for testhdf5 -add_executable (testphdf5 ${testphdf5_SRCS}) -TARGET_NAMING (testphdf5 ${LIB_TYPE}) -TARGET_C_PROPERTIES (testphdf5 ${LIB_TYPE} " " " ") -target_link_libraries (testphdf5 ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_LIBS}) +add_executable (testphdf5 ${testphdf5_SOURCES}) +target_compile_options(testphdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}") +target_compile_definitions(testphdf5 + PRIVATE + $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}> +) +target_include_directories (testphdf5 + PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>" +) +if (NOT BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (testphdf5 STATIC) + target_link_libraries (testphdf5 + PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>" + ) +else () + TARGET_C_PROPERTIES (testphdf5 SHARED) + target_link_libraries (testphdf5 + PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>" + ) +endif () set_target_properties (testphdf5 PROPERTIES FOLDER test/par) -MACRO (ADD_H5P_EXE file) +#----------------------------------------------------------------------------- +# Add Target to clang-format +#----------------------------------------------------------------------------- +if (HDF5_ENABLE_FORMATTERS) + clang_format (HDF5_TEST_PAR_testphdf5_FORMAT testphdf5) +endif () + +macro (ADD_H5P_EXE file) add_executable (${file} ${HDF5_TEST_PAR_SOURCE_DIR}/${file}.c) - TARGET_NAMING (${file} ${LIB_TYPE}) - TARGET_C_PROPERTIES (${file} ${LIB_TYPE} " " " ") - target_link_libraries (${file} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_LIBS}) + target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}") + target_compile_definitions(${file} + PRIVATE + $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}> + ) + target_include_directories (${file} + PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>" + ) + if (NOT BUILD_SHARED_LIBS) + TARGET_C_PROPERTIES (${file} STATIC) + target_link_libraries (${file} + PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>" + $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib> + ) + else () + TARGET_C_PROPERTIES (${file} SHARED) + target_link_libraries (${file} + PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>" + $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib> + ) + endif () set_target_properties (${file} PROPERTIES FOLDER test/par) -ENDMACRO (ADD_H5P_EXE file) + + #----------------------------------------------------------------------------- + # Add Target to clang-format + #----------------------------------------------------------------------------- + if (HDF5_ENABLE_FORMATTERS) + clang_format (HDF5_TEST_PAR_${file}_FORMAT ${file}) + endif () +endmacro (ADD_H5P_EXE file) set (H5P_TESTS t_mpi + t_bigio t_cache + t_cache_image t_pflush1 t_pflush2 + t_pread t_pshutdown t_prestart + t_init_term + t_pmulti_dset t_shapesame + t_filters_parallel + t_subfiling_vfd + t_2Gio + t_vfd ) -foreach (testp ${H5P_TESTS}) - ADD_H5P_EXE(${testp}) -endforeach (testp ${H5P_TESTS}) +foreach (h5_testp ${H5P_TESTS}) + ADD_H5P_EXE(${h5_testp}) +endforeach () -include (CMakeTests.cmake) +if (HDF5_TEST_PARALLEL) + include (CMakeTests.cmake) +endif () diff --git a/testpar/CMakeTests.cmake b/testpar/CMakeTests.cmake index 3716ee6..26968de 100644 --- a/testpar/CMakeTests.cmake +++ b/testpar/CMakeTests.cmake @@ -1,62 +1,152 @@ +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# ############################################################################## ############################################################################## ### T E S T I N G ### ############################################################################## ############################################################################## +# Remove any output file left over from previous test run +add_test ( + NAME MPI_TEST-clear-testphdf5-objects + COMMAND ${CMAKE_COMMAND} -E remove ParaTest.h5 + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} +) +set_tests_properties (MPI_TEST-clear-testphdf5-objects PROPERTIES FIXTURES_SETUP par_clear_testphdf5) +add_test ( + NAME MPI_TEST-clean-testphdf5-objects + COMMAND ${CMAKE_COMMAND} -E remove ParaTest.h5 + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} +) +set_tests_properties (MPI_TEST-clean-testphdf5-objects PROPERTIES FIXTURES_CLEANUP par_clear_testphdf5) -add_test (NAME TEST_PAR_testphdf5 COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:testphdf5>) +set (SKIP_tests + cchunk1 + cchunk2 + cchunk3 + cchunk4 + ecdsetw + eidsetw2 + selnone + cngrpw-ingrpr + cschunkw + ccchunkw + tldsc + actualio + MC_coll_MD_read +) +set (SKIP_testphdf5 "") +foreach (skiptest ${SKIP_tests}) + set (SKIP_testphdf5 "${SKIP_testphdf5};-x;${skiptest}") +endforeach () -foreach (testp ${H5P_TESTS}) - add_test (NAME TEST_PAR_${testp} COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:${testp}>) -endforeach (testp ${H5P_TESTS}) +add_test (NAME MPI_TEST_testphdf5 COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} ${SKIP_testphdf5}) +set_tests_properties (MPI_TEST_testphdf5 PROPERTIES + FIXTURES_REQUIRED par_clear_testphdf5 + ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}" + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} +) +if (last_test) + set_tests_properties (MPI_TEST_testphdf5 PROPERTIES DEPENDS ${last_test}) +endif () +set (last_test "MPI_TEST_testphdf5") -# The following will only be correct on windows shared -#set_tests_properties (TEST_PAR_t_pflush1 PROPERTIES WILL_FAIL "true") -set_property (TEST TEST_PAR_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED") -set_tests_properties (TEST_PAR_t_pflush2 PROPERTIES DEPENDS TEST_PAR_t_pflush1) +#execute the skipped tests +foreach (skiptest ${SKIP_tests}) + add_test (NAME MPI_TEST_testphdf5_${skiptest} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} -o ${skiptest}) + set_tests_properties (MPI_TEST_testphdf5_${skiptest} PROPERTIES + FIXTURES_REQUIRED par_clear_testphdf5 + ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}" + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} + ) + if (last_test) + set_tests_properties (MPI_TEST_testphdf5_${skiptest} PROPERTIES DEPENDS ${last_test}) + endif () + set (last_test "MPI_TEST_testphdf5_${skiptest}") +endforeach () -if (HDF5_TEST_VFD) +#if (HDF5_OPENMPI_VERSION_SKIP) +# list (REMOVE_ITEM H5P_TESTS t_shapesame) +#endif () - set (VFD_LIST - sec2 - stdio - core - split - multi - family - ) +# do not test until new version is added +list (REMOVE_ITEM H5P_TESTS t_cache_image) + +set (test_par_CLEANFILES + t_cache_image_00.h5 + t_cache_image_01.h5 + t_cache_image_02.h5 + flush.h5 + noflush.h5 + reloc_t_pread_data_file.h5 + reloc_t_pread_group_0_file.h5 + reloc_t_pread_group_1_file.h5 + shutdown.h5 + after_mpi_fin.h5 + #the following should have been removed by the programs + bigio_test.h5 + CacheTestDummy.h5 + t_filters_parallel.h5 + MPItest.h5 + ShapeSameTest.h5 + test_subfiling_basic_create.h5 + test_subfiling_config_file.h5 + test_subfiling_stripe_sizes.h5 + test_subfiling_read_different_stripe_sizes.h5 + test_subfiling_precreate_rank_0.h5 + test_subfiling_write_many_read_one.h5 + test_subfiling_write_many_read_few.h5 + test_subfiling_h5fuse.h5 +) - set (H5P_VFD_TESTS - t_pflush1 - t_pflush2 +# Remove any output file left over from previous test run +add_test ( + NAME MPI_TEST-clear-objects + COMMAND ${CMAKE_COMMAND} -E remove ${test_par_CLEANFILES} + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} +) +set_tests_properties (MPI_TEST-clear-objects PROPERTIES FIXTURES_SETUP par_clear_objects) +add_test ( + NAME MPI_TEST-clean-objects + COMMAND ${CMAKE_COMMAND} -E remove ${test_par_CLEANFILES} + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} +) +set_tests_properties (MPI_TEST-clean-objects PROPERTIES FIXTURES_CLEANUP par_clear_objects) + +foreach (h5_testp ${H5P_TESTS}) + add_test (NAME MPI_TEST_${h5_testp} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${h5_testp}> ${MPIEXEC_POSTFLAGS}) + set_tests_properties (MPI_TEST_${h5_testp} PROPERTIES + FIXTURES_REQUIRED par_clear_objects + ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}" + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR} ) - - if (DIRECT_VFD) - set (VFD_LIST ${VFD_LIST} direct) - endif (DIRECT_VFD) - - MACRO (ADD_VFD_TEST vfdname resultcode) - if (NOT HDF5_ENABLE_USING_MEMCHECKER) - foreach (test ${H5P_VFD_TESTS}) - add_test ( - NAME TEST_PAR_VFD-${vfdname}-${test} - COMMAND "${CMAKE_COMMAND}" - -D "TEST_PROGRAM=$<TARGET_FILE:${test}>" - -D "TEST_ARGS:STRING=" - -D "TEST_VFD:STRING=${vfdname}" - -D "TEST_EXPECT=${resultcode}" - -D "TEST_OUTPUT=${test}" - -D "TEST_FOLDER=${PROJECT_BINARY_DIR}" - -P "${HDF_RESOURCES_DIR}/vfdTest.cmake" - ) - endforeach (test ${H5P_VFD_TESTS}) - endif (NOT HDF5_ENABLE_USING_MEMCHECKER) - ENDMACRO (ADD_VFD_TEST) - - # Run test with different Virtual File Driver - foreach (vfd ${VFD_LIST}) - ADD_VFD_TEST (${vfd} 0) - endforeach (vfd ${VFD_LIST}) - -endif (HDF5_TEST_VFD) + if (last_test) + set_tests_properties (MPI_TEST_${h5_testp} PROPERTIES DEPENDS ${last_test}) + endif () + set (last_test "MPI_TEST_${h5_testp}") +endforeach () + +# The t_pflush1 test is hard-coded to fail. +set_tests_properties (MPI_TEST_t_pflush1 PROPERTIES WILL_FAIL "true") +#set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED") +set_tests_properties (MPI_TEST_t_pflush2 PROPERTIES DEPENDS MPI_TEST_t_pflush1) +set_tests_properties (MPI_TEST_t_prestart PROPERTIES DEPENDS MPI_TEST_t_pshutdown) + +############################################################################## +############################################################################## +### V F D T E S T S ### +############################################################################## +############################################################################## + +if (HDF5_TEST_VFD) + include (CMakeVFDTests.cmake) +endif () diff --git a/testpar/CMakeVFDTests.cmake b/testpar/CMakeVFDTests.cmake new file mode 100644 index 0000000..d630015 --- /dev/null +++ b/testpar/CMakeVFDTests.cmake @@ -0,0 +1,73 @@ +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# + +############################################################################## +############################################################################## +### T E S T I N G ### +############################################################################## +############################################################################## +H5_CREATE_VFD_DIR() + +set (H5P_VFD_TESTS + t_pflush1 + t_pflush2 +) + +set (H5P_VFD_subfiling_TESTS_SKIP + t_pflush1 + t_pflush2 +) + +macro (ADD_VFD_TEST vfdname resultcode) + if (NOT HDF5_ENABLE_USING_MEMCHECKER) + foreach (h5_test ${H5P_VFD_TESTS}) + if (NOT "${h5_test}" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP) + add_test ( + NAME MPI_TEST_VFD-${vfdname}-${h5_test} + COMMAND "${CMAKE_COMMAND}" + -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}" + -D "TEST_PROGRAM=$<TARGET_FILE:${h5_test}>" + -D "TEST_ARGS:STRING=" + -D "TEST_VFD:STRING=${vfdname}" + -D "TEST_EXPECT=${resultcode}" + -D "TEST_OUTPUT=${vfdname}-${h5_test}.out" + -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/${vfdname}" + -P "${HDF_RESOURCES_DIR}/vfdTest.cmake" + ) + set_tests_properties (MPI_TEST_VFD-${vfdname}-${h5_test} PROPERTIES + ENVIRONMENT "srcdir=${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}" + WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}/${vfdname} + ) + endif () + endforeach () + if (NOT "t_pflush1" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP) + set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush1 PROPERTIES WILL_FAIL "true") + #set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED") + endif () + if (NOT "t_pflush2" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP) + if (NOT "t_pflush1" IN_LIST H5P_VFD_${vfdname}_TESTS_SKIP) + set_tests_properties (MPI_TEST_VFD-${vfdname}-t_pflush2 PROPERTIES DEPENDS MPI_TEST_VFD-${vfdname}-t_pflush1) + endif () + endif () + endif () +endmacro () + +############################################################################## +############################################################################## +### T H E T E S T S ### +############################################################################## +############################################################################## + +# Run test with different Virtual File Driver +foreach (h5_vfd ${VFD_LIST}) + ADD_VFD_TEST (${h5_vfd} 0) +endforeach () diff --git a/testpar/COPYING b/testpar/COPYING deleted file mode 100644 index 6903daf..0000000 --- a/testpar/COPYING +++ /dev/null @@ -1,16 +0,0 @@ - - Copyright by The HDF Group and - The Board of Trustees of the University of Illinois. - All rights reserved. - - The files and subdirectories in this directory are part of HDF5. - The full HDF5 copyright notice, including terms governing use, - modification, and redistribution, is contained in the files COPYING - and Copyright.html. COPYING can be found at the root of the source - code distribution tree; Copyright.html can be found at the root - level of an installed copy of the electronic HDF5 document set and - is linked from the top-level documents page. It can also be found - at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not - have access to either file, you may request a copy from - help@hdfgroup.org. - diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 1eae439..0506961 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -1,16 +1,13 @@ # # Copyright by The HDF Group. -# Copyright by the Board of Trustees of the University of Illinois. # All rights reserved. # # This file is part of HDF5. The full HDF5 copyright notice, including # terms governing use, modification, and redistribution, is contained in -# the files COPYING and Copyright.html. COPYING can be found at the root -# of the source code distribution tree; Copyright.html can be found at the -# root level of an installed copy of the electronic HDF5 document set and -# is linked from the top-level documents page. It can also be found at -# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have -# access to either file, you may request a copy from help@hdfgroup.org. +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. ## ## Makefile.am ## Run automake to generate a Makefile.in from this file. @@ -23,15 +20,31 @@ include $(top_srcdir)/config/commence.am AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test +if SUBFILING_VFD_CONDITIONAL + AM_CPPFLAGS += -I$(top_srcdir)/src/H5FDsubfiling +endif + +# Test scripts-- +# testpflush.sh: +TEST_SCRIPT_PARA = testpflush.sh +SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT) + +check_SCRIPTS = $(TEST_SCRIPT_PARA) + # Test programs. These are our main targets. # -TEST_PROG_PARA=t_mpi testphdf5 t_cache t_pflush1 t_pflush2 t_pshutdown t_prestart t_shapesame +TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_pmulti_dset t_shapesame t_filters_parallel t_2Gio t_vfd + +if SUBFILING_VFD_CONDITIONAL + TEST_PROG_PARA += t_subfiling_vfd +endif -check_PROGRAMS = $(TEST_PROG_PARA) +# t_pflush1 and t_pflush2 are used by testpflush.sh +check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2 testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \ t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \ - t_prop.c + t_prop.c t_coll_md_read.c t_oflush.c # The tests all depend on the hdf5 library and the test library LDADD = $(LIBH5TEST) $(LIBHDF5) @@ -39,8 +52,12 @@ LDADD = $(LIBH5TEST) $(LIBHDF5) # Temporary files # MPItest.h5 is from t_mpi # Para*.h5 are from testphdf +# bigio_test.h5 is from t_bigio +# ShapeSameTest.h5 is from t_shapesame # shutdown.h5 is from t_pshutdown +# after_mpi_fin.h5 is from t_init_term # go is used for debugging. See testphdf5.c. -CHECK_CLEANFILES+=MPItest.h5 Para*.h5 CacheTestDummy.h5 shutdown.h5 go +CHECK_CLEANFILES+=MPItest.h5 Para*.h5 bigio_test.h5 CacheTestDummy.h5 \ + ShapeSameTest.h5 shutdown.h5 pmulti_dset.h5 after_mpi_fin.h5 go include $(top_srcdir)/config/conclude.am diff --git a/testpar/Makefile.in b/testpar/Makefile.in deleted file mode 100644 index 90315cc..0000000 --- a/testpar/Makefile.in +++ /dev/null @@ -1,1418 +0,0 @@ -# Makefile.in generated by automake 1.14.1 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994-2013 Free Software Foundation, Inc. - -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# -# Copyright by The HDF Group. -# Copyright by the Board of Trustees of the University of Illinois. -# All rights reserved. -# -# This file is part of HDF5. The full HDF5 copyright notice, including -# terms governing use, modification, and redistribution, is contained in -# the files COPYING and Copyright.html. COPYING can be found at the root -# of the source code distribution tree; Copyright.html can be found at the -# root level of an installed copy of the electronic HDF5 document set and -# is linked from the top-level documents page. It can also be found at -# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have -# access to either file, you may request a copy from help@hdfgroup.org. -# -# hdf5 Parallel Library Test Makefile(.in) -# -VPATH = @srcdir@ -am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' -am__make_running_with_option = \ - case $${target_option-} in \ - ?) ;; \ - *) echo "am__make_running_with_option: internal error: invalid" \ - "target option '$${target_option-}' specified" >&2; \ - exit 1;; \ - esac; \ - has_opt=no; \ - sane_makeflags=$$MAKEFLAGS; \ - if $(am__is_gnu_make); then \ - sane_makeflags=$$MFLAGS; \ - else \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - bs=\\; \ - sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ - | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ - esac; \ - fi; \ - skip_next=no; \ - strip_trailopt () \ - { \ - flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ - }; \ - for flg in $$sane_makeflags; do \ - test $$skip_next = yes && { skip_next=no; continue; }; \ - case $$flg in \ - *=*|--*) continue;; \ - -*I) strip_trailopt 'I'; skip_next=yes;; \ - -*I?*) strip_trailopt 'I';; \ - -*O) strip_trailopt 'O'; skip_next=yes;; \ - -*O?*) strip_trailopt 'O';; \ - -*l) strip_trailopt 'l'; skip_next=yes;; \ - -*l?*) strip_trailopt 'l';; \ - -[dEDm]) skip_next=yes;; \ - -[JT]) skip_next=yes;; \ - esac; \ - case $$flg in \ - *$$target_option*) has_opt=yes; break;; \ - esac; \ - done; \ - test $$has_opt = yes -am__make_dryrun = (target_option=n; $(am__make_running_with_option)) -am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -DIST_COMMON = $(top_srcdir)/config/commence.am \ - $(top_srcdir)/config/conclude.am $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(top_srcdir)/bin/mkinstalldirs \ - $(top_srcdir)/bin/depcomp $(top_srcdir)/bin/test-driver \ - COPYING -check_PROGRAMS = $(am__EXEEXT_1) -TESTS = -subdir = testpar -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/m4/aclocal_cxx.m4 \ - $(top_srcdir)/m4/aclocal_fc.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs -CONFIG_HEADER = $(top_builddir)/src/H5config.h -CONFIG_CLEAN_FILES = -CONFIG_CLEAN_VPATH_FILES = -am__EXEEXT_1 = t_mpi$(EXEEXT) testphdf5$(EXEEXT) t_cache$(EXEEXT) \ - t_pflush1$(EXEEXT) t_pflush2$(EXEEXT) t_pshutdown$(EXEEXT) \ - t_prestart$(EXEEXT) t_shapesame$(EXEEXT) -t_cache_SOURCES = t_cache.c -t_cache_OBJECTS = t_cache.$(OBJEXT) -t_cache_LDADD = $(LDADD) -t_cache_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -AM_V_lt = $(am__v_lt_@AM_V@) -am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) -am__v_lt_0 = --silent -am__v_lt_1 = -t_mpi_SOURCES = t_mpi.c -t_mpi_OBJECTS = t_mpi.$(OBJEXT) -t_mpi_LDADD = $(LDADD) -t_mpi_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -t_pflush1_SOURCES = t_pflush1.c -t_pflush1_OBJECTS = t_pflush1.$(OBJEXT) -t_pflush1_LDADD = $(LDADD) -t_pflush1_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -t_pflush2_SOURCES = t_pflush2.c -t_pflush2_OBJECTS = t_pflush2.$(OBJEXT) -t_pflush2_LDADD = $(LDADD) -t_pflush2_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -t_prestart_SOURCES = t_prestart.c -t_prestart_OBJECTS = t_prestart.$(OBJEXT) -t_prestart_LDADD = $(LDADD) -t_prestart_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -t_pshutdown_SOURCES = t_pshutdown.c -t_pshutdown_OBJECTS = t_pshutdown.$(OBJEXT) -t_pshutdown_LDADD = $(LDADD) -t_pshutdown_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -t_shapesame_SOURCES = t_shapesame.c -t_shapesame_OBJECTS = t_shapesame.$(OBJEXT) -t_shapesame_LDADD = $(LDADD) -t_shapesame_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -am_testphdf5_OBJECTS = testphdf5.$(OBJEXT) t_dset.$(OBJEXT) \ - t_file.$(OBJEXT) t_file_image.$(OBJEXT) t_mdset.$(OBJEXT) \ - t_ph5basic.$(OBJEXT) t_coll_chunk.$(OBJEXT) \ - t_span_tree.$(OBJEXT) t_chunk_alloc.$(OBJEXT) \ - t_filter_read.$(OBJEXT) t_prop.$(OBJEXT) -testphdf5_OBJECTS = $(am_testphdf5_OBJECTS) -testphdf5_LDADD = $(LDADD) -testphdf5_DEPENDENCIES = $(LIBH5TEST) $(LIBHDF5) -AM_V_P = $(am__v_P_@AM_V@) -am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) -am__v_P_0 = false -am__v_P_1 = : -AM_V_GEN = $(am__v_GEN_@AM_V@) -am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) -am__v_GEN_0 = @echo " GEN " $@; -am__v_GEN_1 = -AM_V_at = $(am__v_at_@AM_V@) -am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) -am__v_at_0 = @ -am__v_at_1 = -DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/src -depcomp = $(SHELL) $(top_srcdir)/bin/depcomp -am__depfiles_maybe = depfiles -am__mv = mv -f -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ - $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ - $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ - $(AM_CFLAGS) $(CFLAGS) -AM_V_CC = $(am__v_CC_@AM_V@) -am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) -am__v_CC_0 = @echo " CC " $@; -am__v_CC_1 = -CCLD = $(CC) -LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ - $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ - $(AM_LDFLAGS) $(LDFLAGS) -o $@ -AM_V_CCLD = $(am__v_CCLD_@AM_V@) -am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) -am__v_CCLD_0 = @echo " CCLD " $@; -am__v_CCLD_1 = -SOURCES = t_cache.c t_mpi.c t_pflush1.c t_pflush2.c t_prestart.c \ - t_pshutdown.c t_shapesame.c $(testphdf5_SOURCES) -DIST_SOURCES = t_cache.c t_mpi.c t_pflush1.c t_pflush2.c t_prestart.c \ - t_pshutdown.c t_shapesame.c $(testphdf5_SOURCES) -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) -# Read a list of newline-separated strings from the standard input, -# and print each of them once, without duplicates. Input order is -# *not* preserved. -am__uniquify_input = $(AWK) '\ - BEGIN { nonempty = 0; } \ - { items[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in items) print i; }; } \ -' -# Make sure the list of sources is unique. This is necessary because, -# e.g., the same source file might be shared among _SOURCES variables -# for different programs/libraries. -am__define_uniq_tagged_files = \ - list='$(am__tagged_files)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | $(am__uniquify_input)` -ETAGS = etags -CTAGS = ctags -am__tty_colors_dummy = \ - mgn= red= grn= lgn= blu= brg= std=; \ - am__color_tests=no -am__tty_colors = { \ - $(am__tty_colors_dummy); \ - if test "X$(AM_COLOR_TESTS)" = Xno; then \ - am__color_tests=no; \ - elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ - am__color_tests=yes; \ - elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ - am__color_tests=yes; \ - fi; \ - if test $$am__color_tests = yes; then \ - red='[0;31m'; \ - grn='[0;32m'; \ - lgn='[1;32m'; \ - blu='[1;34m'; \ - mgn='[0;35m'; \ - brg='[1m'; \ - std='[m'; \ - fi; \ -} -am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; -am__vpath_adj = case $$p in \ - $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ - *) f=$$p;; \ - esac; -am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; -am__install_max = 40 -am__nobase_strip_setup = \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` -am__nobase_strip = \ - for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" -am__nobase_list = $(am__nobase_strip_setup); \ - for p in $$list; do echo "$$p $$p"; done | \ - sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ - $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ - if (++n[$$2] == $(am__install_max)) \ - { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ - END { for (dir in files) print dir, files[dir] }' -am__base_list = \ - sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ - sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' -am__uninstall_files_from_dir = { \ - test -z "$$files" \ - || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ - || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ - $(am__cd) "$$dir" && rm -f $$files; }; \ - } -am__recheck_rx = ^[ ]*:recheck:[ ]* -am__global_test_result_rx = ^[ ]*:global-test-result:[ ]* -am__copy_in_global_log_rx = ^[ ]*:copy-in-global-log:[ ]* -# A command that, given a newline-separated list of test names on the -# standard input, print the name of the tests that are to be re-run -# upon "make recheck". -am__list_recheck_tests = $(AWK) '{ \ - recheck = 1; \ - while ((rc = (getline line < ($$0 ".trs"))) != 0) \ - { \ - if (rc < 0) \ - { \ - if ((getline line2 < ($$0 ".log")) < 0) \ - recheck = 0; \ - break; \ - } \ - else if (line ~ /$(am__recheck_rx)[nN][Oo]/) \ - { \ - recheck = 0; \ - break; \ - } \ - else if (line ~ /$(am__recheck_rx)[yY][eE][sS]/) \ - { \ - break; \ - } \ - }; \ - if (recheck) \ - print $$0; \ - close ($$0 ".trs"); \ - close ($$0 ".log"); \ -}' -# A command that, given a newline-separated list of test names on the -# standard input, create the global log from their .trs and .log files. -am__create_global_log = $(AWK) ' \ -function fatal(msg) \ -{ \ - print "fatal: making $@: " msg | "cat >&2"; \ - exit 1; \ -} \ -function rst_section(header) \ -{ \ - print header; \ - len = length(header); \ - for (i = 1; i <= len; i = i + 1) \ - printf "="; \ - printf "\n\n"; \ -} \ -{ \ - copy_in_global_log = 1; \ - global_test_result = "RUN"; \ - while ((rc = (getline line < ($$0 ".trs"))) != 0) \ - { \ - if (rc < 0) \ - fatal("failed to read from " $$0 ".trs"); \ - if (line ~ /$(am__global_test_result_rx)/) \ - { \ - sub("$(am__global_test_result_rx)", "", line); \ - sub("[ ]*$$", "", line); \ - global_test_result = line; \ - } \ - else if (line ~ /$(am__copy_in_global_log_rx)[nN][oO]/) \ - copy_in_global_log = 0; \ - }; \ - if (copy_in_global_log) \ - { \ - rst_section(global_test_result ": " $$0); \ - while ((rc = (getline line < ($$0 ".log"))) != 0) \ - { \ - if (rc < 0) \ - fatal("failed to read from " $$0 ".log"); \ - print line; \ - }; \ - printf "\n"; \ - }; \ - close ($$0 ".trs"); \ - close ($$0 ".log"); \ -}' -# Restructured Text title. -am__rst_title = { sed 's/.*/ & /;h;s/./=/g;p;x;s/ *$$//;p;g' && echo; } -# Solaris 10 'make', and several other traditional 'make' implementations, -# pass "-e" to $(SHELL), and POSIX 2008 even requires this. Work around it -# by disabling -e (using the XSI extension "set +e") if it's set. -am__sh_e_setup = case $$- in *e*) set +e;; esac -# Default flags passed to test drivers. -am__common_driver_flags = \ - --color-tests "$$am__color_tests" \ - --enable-hard-errors "$$am__enable_hard_errors" \ - --expect-failure "$$am__expect_failure" -# To be inserted before the command running the test. Creates the -# directory for the log if needed. Stores in $dir the directory -# containing $f, in $tst the test, in $log the log. Executes the -# developer- defined test setup AM_TESTS_ENVIRONMENT (if any), and -# passes TESTS_ENVIRONMENT. Set up options for the wrapper that -# will run the test scripts (or their associated LOG_COMPILER, if -# thy have one). -am__check_pre = \ -$(am__sh_e_setup); \ -$(am__vpath_adj_setup) $(am__vpath_adj) \ -$(am__tty_colors); \ -srcdir=$(srcdir); export srcdir; \ -case "$@" in \ - */*) am__odir=`echo "./$@" | sed 's|/[^/]*$$||'`;; \ - *) am__odir=.;; \ -esac; \ -test "x$$am__odir" = x"." || test -d "$$am__odir" \ - || $(MKDIR_P) "$$am__odir" || exit $$?; \ -if test -f "./$$f"; then dir=./; \ -elif test -f "$$f"; then dir=; \ -else dir="$(srcdir)/"; fi; \ -tst=$$dir$$f; log='$@'; \ -if test -n '$(DISABLE_HARD_ERRORS)'; then \ - am__enable_hard_errors=no; \ -else \ - am__enable_hard_errors=yes; \ -fi; \ -case " $(XFAIL_TESTS) " in \ - *[\ \ ]$$f[\ \ ]* | *[\ \ ]$$dir$$f[\ \ ]*) \ - am__expect_failure=yes;; \ - *) \ - am__expect_failure=no;; \ -esac; \ -$(AM_TESTS_ENVIRONMENT) $(TESTS_ENVIRONMENT) -# A shell command to get the names of the tests scripts with any registered -# extension removed (i.e., equivalently, the names of the test logs, with -# the '.log' extension removed). The result is saved in the shell variable -# '$bases'. This honors runtime overriding of TESTS and TEST_LOGS. Sadly, -# we cannot use something simpler, involving e.g., "$(TEST_LOGS:.log=)", -# since that might cause problem with VPATH rewrites for suffix-less tests. -# See also 'test-harness-vpath-rewrite.sh' and 'test-trs-basic.sh'. -am__set_TESTS_bases = \ - bases='$(TEST_LOGS)'; \ - bases=`for i in $$bases; do echo $$i; done | sed 's/\.log$$//'`; \ - bases=`echo $$bases` -RECHECK_LOGS = $(TEST_LOGS) -AM_RECURSIVE_TARGETS = check recheck -TEST_SUITE_LOG = test-suite.log -am__test_logs1 = $(TESTS:=.log) -am__test_logs2 = $(am__test_logs1:@EXEEXT@.log=.log) -TEST_LOGS = $(am__test_logs2:.sh.log=.log) -SH_LOG_DRIVER = $(SHELL) $(top_srcdir)/bin/test-driver -SH_LOG_COMPILE = $(SH_LOG_COMPILER) $(AM_SH_LOG_FLAGS) $(SH_LOG_FLAGS) -am__set_b = \ - case '$@' in \ - */*) \ - case '$*' in \ - */*) b='$*';; \ - *) b=`echo '$@' | sed 's/\.log$$//'`; \ - esac;; \ - *) \ - b='$*';; \ - esac -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@ -AMTAR = @AMTAR@ - -# H5_CFLAGS holds flags that should be used when building hdf5, -# but which should not be exported to h5cc for building other programs. -# AM_CFLAGS is an automake construct which should be used by Makefiles -# instead of CFLAGS, as CFLAGS is reserved solely for the user to define. -# This applies to FCFLAGS, CXXFLAGS, CPPFLAGS, and LDFLAGS as well. -AM_CFLAGS = @AM_CFLAGS@ @H5_CFLAGS@ -AM_CPPFLAGS = @AM_CPPFLAGS@ @H5_CPPFLAGS@ -I$(top_srcdir)/src \ - -I$(top_srcdir)/test -AM_CXXFLAGS = @AM_CXXFLAGS@ @H5_CXXFLAGS@ -AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ -AM_FCFLAGS = @AM_FCFLAGS@ @H5_FCFLAGS@ -AM_LDFLAGS = @AM_LDFLAGS@ @H5_LDFLAGS@ -AM_MAKEFLAGS = @AM_MAKEFLAGS@ -AR = @AR@ -AS = @AS@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -BYTESEX = @BYTESEX@ -CC = @CC@ -CCDEPMODE = @CCDEPMODE@ -CC_VERSION = @CC_VERSION@ -CFLAGS = @CFLAGS@ -CLEARFILEBUF = @CLEARFILEBUF@ -CODESTACK = @CODESTACK@ -CONFIG_DATE = @CONFIG_DATE@ -CONFIG_MODE = @CONFIG_MODE@ -CONFIG_USER = @CONFIG_USER@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CXX_VERSION = @CXX_VERSION@ -CYGPATH_W = @CYGPATH_W@ -DEBUG_PKG = @DEBUG_PKG@ -DEFAULT_API_VERSION = @DEFAULT_API_VERSION@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -DEPRECATED_SYMBOLS = @DEPRECATED_SYMBOLS@ -DIRECT_VFD = @DIRECT_VFD@ -DLLTOOL = @DLLTOOL@ -DSYMUTIL = @DSYMUTIL@ -DUMPBIN = @DUMPBIN@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -EXTERNAL_FILTERS = @EXTERNAL_FILTERS@ - -# Make sure that these variables are exported to the Makefiles -F9XMODEXT = @F9XMODEXT@ -F9XMODFLAG = @F9XMODFLAG@ -F9XSUFFIXFLAG = @F9XSUFFIXFLAG@ -FC = @FC@ -FC2003 = @FC2003@ -FCFLAGS = @FCFLAGS@ -FCFLAGS_f90 = @FCFLAGS_f90@ -FCLIBS = @FCLIBS@ -FC_VERSION = @FC_VERSION@ -FGREP = @FGREP@ -FSEARCH_DIRS = @FSEARCH_DIRS@ -GREP = @GREP@ -H5_CFLAGS = @H5_CFLAGS@ -H5_CPPFLAGS = @H5_CPPFLAGS@ -H5_CXXFLAGS = @H5_CXXFLAGS@ -H5_CXX_SHARED = @H5_CXX_SHARED@ -H5_FCFLAGS = @H5_FCFLAGS@ -H5_FORTRAN_SHARED = @H5_FORTRAN_SHARED@ -H5_LDFLAGS = @H5_LDFLAGS@ -H5_LONE_COLON = @H5_LONE_COLON@ -H5_VERSION = @H5_VERSION@ -HADDR_T = @HADDR_T@ -HAVE_DMALLOC = @HAVE_DMALLOC@ -HAVE_FORTRAN_2003 = @HAVE_FORTRAN_2003@ -HAVE_PTHREAD = @HAVE_PTHREAD@ -HDF5_HL = @HDF5_HL@ -HDF5_INTERFACES = @HDF5_INTERFACES@ -HDF_CXX = @HDF_CXX@ -HDF_FORTRAN = @HDF_FORTRAN@ -HDF_FORTRAN2003 = @HDF_FORTRAN2003@ -HID_T = @HID_T@ -HL = @HL@ -HL_FOR = @HL_FOR@ -HSIZE_T = @HSIZE_T@ -HSSIZE_T = @HSSIZE_T@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -INSTRUMENT = @INSTRUMENT@ -INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@ -LD = @LD@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LIPO = @LIPO@ -LL_PATH = @LL_PATH@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -LT_STATIC_EXEC = @LT_STATIC_EXEC@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MANIFEST_TOOL = @MANIFEST_TOOL@ -MKDIR_P = @MKDIR_P@ -MPE = @MPE@ -NM = @NM@ -NMEDIT = @NMEDIT@ -OBJDUMP = @OBJDUMP@ -OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@ -OBJEXT = @OBJEXT@ -OTOOL = @OTOOL@ -OTOOL64 = @OTOOL64@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PARALLEL = @PARALLEL@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PERL = @PERL@ -RANLIB = @RANLIB@ -ROOT = @ROOT@ -RUNPARALLEL = @RUNPARALLEL@ -RUNSERIAL = @RUNSERIAL@ -R_INTEGER = @R_INTEGER@ -R_LARGE = @R_LARGE@ -SEARCH = @SEARCH@ -SED = @SED@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -SIZE_T = @SIZE_T@ -STATIC_EXEC = @STATIC_EXEC@ -STATIC_SHARED = @STATIC_SHARED@ -STRICT_FORMAT_CHECKS = @STRICT_FORMAT_CHECKS@ -STRIP = @STRIP@ -TESTPARALLEL = @TESTPARALLEL@ -THREADSAFE = @THREADSAFE@ -TIME = @TIME@ -TR = @TR@ -TRACE_API = @TRACE_API@ -UNAME_INFO = @UNAME_INFO@ -USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@ -USE_FILTER_SZIP = @USE_FILTER_SZIP@ -USINGMEMCHECKER = @USINGMEMCHECKER@ -VERSION = @VERSION@ -WORDS_BIGENDIAN = @WORDS_BIGENDIAN@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_AR = @ac_ct_AR@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ -ac_ct_FC = @ac_ct_FC@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -datadir = @datadir@ -datarootdir = @datarootdir@ - -# Install directories that automake doesn't know about -docdir = $(exec_prefix)/doc -dvidir = @dvidir@ -enable_shared = @enable_shared@ -enable_static = @enable_static@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localedir = @localedir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ - -# Shell commands used in Makefiles -RM = rm -f -CP = cp - -# Some machines need a command to run executables; this is that command -# so that our tests will run. -# We use RUNEXEC instead of RUNSERIAL directly because it may be that -# some tests need to be run with a different command. Older versions -# of the makefiles used the command -# $(LIBTOOL) --mode=execute -# in some directories, for instance. -RUNEXEC = $(RUNSERIAL) - -# Libraries to link to while building -LIBHDF5 = $(top_builddir)/src/libhdf5.la -LIBH5TEST = $(top_builddir)/test/libh5test.la -LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la -LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la -LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la -LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la -LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la -LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la -LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la - -# Note that in svn revision 19400 the '/' after DESTDIR in H5* variables below -# has been removed. According to the official description of DESTDIR by Gnu at -# http://www.gnu.org/prep/standards/html_node/DESTDIR.html, DESTDIR is -# prepended to the normal and complete install path that it precedes for the -# purpose of installing in a temporary directory which is useful for building -# rpms and other packages. The '/' after ${DESTDIR} will be followed by another -# '/' at the beginning of the normal install path. When DESTDIR is empty the -# path then begins with '//', which is incorrect and causes problems at least for -# Cygwin. - -# Scripts used to build examples -# If only shared libraries have been installed, have h5cc build examples with -# shared libraries instead of static libraries -H5CC = ${DESTDIR}$(bindir)/h5cc -H5CC_PP = ${DESTDIR}$(bindir)/h5pcc -H5FC = ${DESTDIR}$(bindir)/h5fc -H5FC_PP = ${DESTDIR}$(bindir)/h5pfc -H5CPP = ${DESTDIR}$(bindir)/h5c++ -ACLOCAL_AMFLAGS = "-I m4" - -# The trace script; this is used on source files from the C library to -# insert tracing macros. -TRACE = perl $(top_srcdir)/bin/trace - -# .chkexe files are used to mark tests that have run successfully. -# .chklog files are output from those tests. -# *.clog and *.clog2 are from the MPE option. - -# Temporary files -# MPItest.h5 is from t_mpi -# Para*.h5 are from testphdf -# shutdown.h5 is from t_pshutdown -# go is used for debugging. See testphdf5.c. -CHECK_CLEANFILES = *.chkexe *.chklog *.clog *.clog2 MPItest.h5 \ - Para*.h5 CacheTestDummy.h5 shutdown.h5 go - -# Test programs. These are our main targets. -# -TEST_PROG_PARA = t_mpi testphdf5 t_cache t_pflush1 t_pflush2 t_pshutdown t_prestart t_shapesame -testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \ - t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \ - t_prop.c - - -# The tests all depend on the hdf5 library and the test library -LDADD = $(LIBH5TEST) $(LIBHDF5) - -# Automake needs to be taught how to build lib, progs, and tests targets. -# These will be filled in automatically for the most part (e.g., -# lib_LIBRARIES are built for lib target), but EXTRA_LIB, EXTRA_PROG, and -# EXTRA_TEST variables are supplied to allow the user to force targets to -# be built at certain times. -LIB = $(lib_LIBRARIES) $(lib_LTLIBRARIES) $(noinst_LIBRARIES) \ - $(noinst_LTLIBRARIES) $(check_LIBRARIES) $(check_LTLIBRARIES) $(EXTRA_LIB) - -PROGS = $(bin_PROGRAMS) $(bin_SCRIPTS) $(noinst_PROGRAMS) $(noinst_SCRIPTS) \ - $(EXTRA_PROG) - -chk_TESTS = $(check_PROGRAMS) $(check_SCRIPTS) $(EXTRA_TEST) -TEST_EXTENSIONS = .sh -SH_LOG_COMPILER = $(SHELL) -AM_SH_LOG_FLAGS = -TEST_PROG_CHKEXE = $(TEST_PROG:=.chkexe_) -TEST_PROG_PARA_CHKEXE = $(TEST_PROG_PARA:=.chkexe_) -TEST_SCRIPT_CHKSH = $(TEST_SCRIPT:=.chkexe_) -TEST_SCRIPT_PARA_CHKSH = $(TEST_SCRIPT_PARA:=.chkexe_) -all: all-am - -.SUFFIXES: -.SUFFIXES: .c .lo .log .o .obj .sh .sh$(EXEEXT) .trs -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence.am $(top_srcdir)/config/conclude.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ - && { if test -f $@; then exit 0; else break; fi; }; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testpar/Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --foreign testpar/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; -$(top_srcdir)/config/commence.am $(top_srcdir)/config/conclude.am: - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(am__aclocal_m4_deps): - -clean-checkPROGRAMS: - @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ - echo " rm -f" $$list; \ - rm -f $$list || exit $$?; \ - test -n "$(EXEEXT)" || exit 0; \ - list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ - echo " rm -f" $$list; \ - rm -f $$list - -t_cache$(EXEEXT): $(t_cache_OBJECTS) $(t_cache_DEPENDENCIES) $(EXTRA_t_cache_DEPENDENCIES) - @rm -f t_cache$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_cache_OBJECTS) $(t_cache_LDADD) $(LIBS) - -t_mpi$(EXEEXT): $(t_mpi_OBJECTS) $(t_mpi_DEPENDENCIES) $(EXTRA_t_mpi_DEPENDENCIES) - @rm -f t_mpi$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_mpi_OBJECTS) $(t_mpi_LDADD) $(LIBS) - -t_pflush1$(EXEEXT): $(t_pflush1_OBJECTS) $(t_pflush1_DEPENDENCIES) $(EXTRA_t_pflush1_DEPENDENCIES) - @rm -f t_pflush1$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_pflush1_OBJECTS) $(t_pflush1_LDADD) $(LIBS) - -t_pflush2$(EXEEXT): $(t_pflush2_OBJECTS) $(t_pflush2_DEPENDENCIES) $(EXTRA_t_pflush2_DEPENDENCIES) - @rm -f t_pflush2$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_pflush2_OBJECTS) $(t_pflush2_LDADD) $(LIBS) - -t_prestart$(EXEEXT): $(t_prestart_OBJECTS) $(t_prestart_DEPENDENCIES) $(EXTRA_t_prestart_DEPENDENCIES) - @rm -f t_prestart$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_prestart_OBJECTS) $(t_prestart_LDADD) $(LIBS) - -t_pshutdown$(EXEEXT): $(t_pshutdown_OBJECTS) $(t_pshutdown_DEPENDENCIES) $(EXTRA_t_pshutdown_DEPENDENCIES) - @rm -f t_pshutdown$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_pshutdown_OBJECTS) $(t_pshutdown_LDADD) $(LIBS) - -t_shapesame$(EXEEXT): $(t_shapesame_OBJECTS) $(t_shapesame_DEPENDENCIES) $(EXTRA_t_shapesame_DEPENDENCIES) - @rm -f t_shapesame$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(t_shapesame_OBJECTS) $(t_shapesame_LDADD) $(LIBS) - -testphdf5$(EXEEXT): $(testphdf5_OBJECTS) $(testphdf5_DEPENDENCIES) $(EXTRA_testphdf5_DEPENDENCIES) - @rm -f testphdf5$(EXEEXT) - $(AM_V_CCLD)$(LINK) $(testphdf5_OBJECTS) $(testphdf5_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_cache.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_chunk_alloc.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_coll_chunk.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_dset.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_file.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_file_image.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_filter_read.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_mdset.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_mpi.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_pflush1.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_pflush2.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_ph5basic.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_prestart.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_prop.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_pshutdown.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_shapesame.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_span_tree.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/testphdf5.Po@am__quote@ - -.c.o: -@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< - -.c.obj: -@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` - -.c.lo: -@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -ID: $(am__tagged_files) - $(am__define_uniq_tagged_files); mkid -fID $$unique -tags: tags-am -TAGS: tags - -tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - set x; \ - here=`pwd`; \ - $(am__define_uniq_tagged_files); \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: ctags-am - -CTAGS: ctags -ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) - $(am__define_uniq_tagged_files); \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" -cscopelist: cscopelist-am - -cscopelist-am: $(am__tagged_files) - list='$(am__tagged_files)'; \ - case "$(srcdir)" in \ - [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ - *) sdir=$(subdir)/$(srcdir) ;; \ - esac; \ - for i in $$list; do \ - if test -f "$$i"; then \ - echo "$(subdir)/$$i"; \ - else \ - echo "$$sdir/$$i"; \ - fi; \ - done >> $(top_builddir)/cscope.files - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -# Recover from deleted '.trs' file; this should ensure that -# "rm -f foo.log; make foo.trs" re-run 'foo.test', and re-create -# both 'foo.log' and 'foo.trs'. Break the recipe in two subshells -# to avoid problems with "make -n". -.log.trs: - rm -f $< $@ - $(MAKE) $(AM_MAKEFLAGS) $< - -# Leading 'am--fnord' is there to ensure the list of targets does not -# expand to empty, as could happen e.g. with make check TESTS=''. -am--fnord $(TEST_LOGS) $(TEST_LOGS:.log=.trs): $(am__force_recheck) -am--force-recheck: - @: - -$(TEST_SUITE_LOG): $(TEST_LOGS) - @$(am__set_TESTS_bases); \ - am__f_ok () { test -f "$$1" && test -r "$$1"; }; \ - redo_bases=`for i in $$bases; do \ - am__f_ok $$i.trs && am__f_ok $$i.log || echo $$i; \ - done`; \ - if test -n "$$redo_bases"; then \ - redo_logs=`for i in $$redo_bases; do echo $$i.log; done`; \ - redo_results=`for i in $$redo_bases; do echo $$i.trs; done`; \ - if $(am__make_dryrun); then :; else \ - rm -f $$redo_logs && rm -f $$redo_results || exit 1; \ - fi; \ - fi; \ - if test -n "$$am__remaking_logs"; then \ - echo "fatal: making $(TEST_SUITE_LOG): possible infinite" \ - "recursion detected" >&2; \ - else \ - am__remaking_logs=yes $(MAKE) $(AM_MAKEFLAGS) $$redo_logs; \ - fi; \ - if $(am__make_dryrun); then :; else \ - st=0; \ - errmsg="fatal: making $(TEST_SUITE_LOG): failed to create"; \ - for i in $$redo_bases; do \ - test -f $$i.trs && test -r $$i.trs \ - || { echo "$$errmsg $$i.trs" >&2; st=1; }; \ - test -f $$i.log && test -r $$i.log \ - || { echo "$$errmsg $$i.log" >&2; st=1; }; \ - done; \ - test $$st -eq 0 || exit 1; \ - fi - @$(am__sh_e_setup); $(am__tty_colors); $(am__set_TESTS_bases); \ - ws='[ ]'; \ - results=`for b in $$bases; do echo $$b.trs; done`; \ - test -n "$$results" || results=/dev/null; \ - all=` grep "^$$ws*:test-result:" $$results | wc -l`; \ - pass=` grep "^$$ws*:test-result:$$ws*PASS" $$results | wc -l`; \ - fail=` grep "^$$ws*:test-result:$$ws*FAIL" $$results | wc -l`; \ - skip=` grep "^$$ws*:test-result:$$ws*SKIP" $$results | wc -l`; \ - xfail=`grep "^$$ws*:test-result:$$ws*XFAIL" $$results | wc -l`; \ - xpass=`grep "^$$ws*:test-result:$$ws*XPASS" $$results | wc -l`; \ - error=`grep "^$$ws*:test-result:$$ws*ERROR" $$results | wc -l`; \ - if test `expr $$fail + $$xpass + $$error` -eq 0; then \ - success=true; \ - else \ - success=false; \ - fi; \ - br='==================='; br=$$br$$br$$br$$br; \ - result_count () \ - { \ - if test x"$$1" = x"--maybe-color"; then \ - maybe_colorize=yes; \ - elif test x"$$1" = x"--no-color"; then \ - maybe_colorize=no; \ - else \ - echo "$@: invalid 'result_count' usage" >&2; exit 4; \ - fi; \ - shift; \ - desc=$$1 count=$$2; \ - if test $$maybe_colorize = yes && test $$count -gt 0; then \ - color_start=$$3 color_end=$$std; \ - else \ - color_start= color_end=; \ - fi; \ - echo "$${color_start}# $$desc $$count$${color_end}"; \ - }; \ - create_testsuite_report () \ - { \ - result_count $$1 "TOTAL:" $$all "$$brg"; \ - result_count $$1 "PASS: " $$pass "$$grn"; \ - result_count $$1 "SKIP: " $$skip "$$blu"; \ - result_count $$1 "XFAIL:" $$xfail "$$lgn"; \ - result_count $$1 "FAIL: " $$fail "$$red"; \ - result_count $$1 "XPASS:" $$xpass "$$red"; \ - result_count $$1 "ERROR:" $$error "$$mgn"; \ - }; \ - { \ - echo "$(PACKAGE_STRING): $(subdir)/$(TEST_SUITE_LOG)" | \ - $(am__rst_title); \ - create_testsuite_report --no-color; \ - echo; \ - echo ".. contents:: :depth: 2"; \ - echo; \ - for b in $$bases; do echo $$b; done \ - | $(am__create_global_log); \ - } >$(TEST_SUITE_LOG).tmp || exit 1; \ - mv $(TEST_SUITE_LOG).tmp $(TEST_SUITE_LOG); \ - if $$success; then \ - col="$$grn"; \ - else \ - col="$$red"; \ - test x"$$VERBOSE" = x || cat $(TEST_SUITE_LOG); \ - fi; \ - echo "$${col}$$br$${std}"; \ - echo "$${col}Testsuite summary for $(PACKAGE_STRING)$${std}"; \ - echo "$${col}$$br$${std}"; \ - create_testsuite_report --maybe-color; \ - echo "$$col$$br$$std"; \ - if $$success; then :; else \ - echo "$${col}See $(subdir)/$(TEST_SUITE_LOG)$${std}"; \ - if test -n "$(PACKAGE_BUGREPORT)"; then \ - echo "$${col}Please report to $(PACKAGE_BUGREPORT)$${std}"; \ - fi; \ - echo "$$col$$br$$std"; \ - fi; \ - $$success || exit 1 -recheck: all $(check_PROGRAMS) - @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) - @set +e; $(am__set_TESTS_bases); \ - bases=`for i in $$bases; do echo $$i; done \ - | $(am__list_recheck_tests)` || exit 1; \ - log_list=`for i in $$bases; do echo $$i.log; done`; \ - log_list=`echo $$log_list`; \ - $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) \ - am__force_recheck=am--force-recheck \ - TEST_LOGS="$$log_list"; \ - exit $$? -.sh.log: - @p='$<'; \ - $(am__set_b); \ - $(am__check_pre) $(SH_LOG_DRIVER) --test-name "$$f" \ - --log-file $$b.log --trs-file $$b.trs \ - $(am__common_driver_flags) $(AM_SH_LOG_DRIVER_FLAGS) $(SH_LOG_DRIVER_FLAGS) -- $(SH_LOG_COMPILE) \ - "$$tst" $(AM_TESTS_FD_REDIRECT) -@am__EXEEXT_TRUE@.sh$(EXEEXT).log: -@am__EXEEXT_TRUE@ @p='$<'; \ -@am__EXEEXT_TRUE@ $(am__set_b); \ -@am__EXEEXT_TRUE@ $(am__check_pre) $(SH_LOG_DRIVER) --test-name "$$f" \ -@am__EXEEXT_TRUE@ --log-file $$b.log --trs-file $$b.trs \ -@am__EXEEXT_TRUE@ $(am__common_driver_flags) $(AM_SH_LOG_DRIVER_FLAGS) $(SH_LOG_DRIVER_FLAGS) -- $(SH_LOG_COMPILE) \ -@am__EXEEXT_TRUE@ "$$tst" $(AM_TESTS_FD_REDIRECT) - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ - list='$(DISTFILES)'; \ - dist_files=`for file in $$list; do echo $$file; done | \ - sed -e "s|^$$srcdirstrip/||;t" \ - -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ - case $$dist_files in \ - */*) $(MKDIR_P) `echo "$$dist_files" | \ - sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ - sort -u` ;; \ - esac; \ - for file in $$dist_files; do \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - if test -d $$d/$$file; then \ - dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test -d "$(distdir)/$$file"; then \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ - find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ - fi; \ - cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ - else \ - test -f "$(distdir)/$$file" \ - || cp -p $$d/$$file "$(distdir)/$$file" \ - || exit 1; \ - fi; \ - done -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: check-am -all-am: Makefile all-local -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -test -z "$(TEST_LOGS)" || rm -f $(TEST_LOGS) - -test -z "$(TEST_LOGS:.log=.trs)" || rm -f $(TEST_LOGS:.log=.trs) - -test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ - mostlyclean-am - -distclean: distclean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-compile mostlyclean-generic \ - mostlyclean-libtool mostlyclean-local - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: check-am install-am install-strip - -.PHONY: CTAGS GTAGS TAGS all all-am all-local check check-TESTS \ - check-am clean clean-checkPROGRAMS clean-generic clean-libtool \ - cscopelist-am ctags ctags-am distclean distclean-compile \ - distclean-generic distclean-libtool distclean-tags distdir dvi \ - dvi-am html html-am info info-am install install-am \ - install-data install-data-am install-dvi install-dvi-am \ - install-exec install-exec-am install-html install-html-am \ - install-info install-info-am install-man install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic mostlyclean-libtool mostlyclean-local pdf \ - pdf-am ps ps-am recheck tags tags-am uninstall uninstall-am - - -# List all build rules defined by HDF5 Makefiles as "PHONY" targets here. -# This tells the Makefiles that these targets are not files to be built but -# commands that should be executed even if a file with the same name already -# exists. -.PHONY: build-check-clean build-check-p build-check-s build-lib build-progs \ - build-tests check-clean check-install check-p check-s check-vfd \ - install-doc lib progs tests uninstall-doc _exec_check-s _test help - -help: - @$(top_srcdir)/bin/makehelp - -# lib/progs/tests targets recurse into subdirectories. build-* targets -# build files in this directory. -build-lib: $(LIB) -build-progs: $(LIB) $(PROGS) -build-tests: $(LIB) $(PROGS) $(chk_TESTS) - -# General rule for recursive building targets. -# BUILT_SOURCES contain targets that need to be built before anything else -# in the directory (e.g., for Fortran type detection) -lib progs tests check-s check-p :: $(BUILT_SOURCES) - @$(MAKE) $(AM_MAKEFLAGS) build-$@ || exit 1; - @for d in X $(SUBDIRS); do \ - if test $$d != X && test $$d != .; then \ - (set -x; cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; \ - done - -# General rule for recursive cleaning targets. Like the rule above, -# but doesn't require building BUILT_SOURCES. -check-clean :: - @$(MAKE) $(AM_MAKEFLAGS) build-$@ || exit 1; - @for d in X $(SUBDIRS); do \ - if test $$d != X && test $$d != .; then \ - (set -x; cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \ - fi; \ - done - -# Tell Automake to build tests when the user types `make all' (this is -# not its default behavior). Also build EXTRA_LIB and EXTRA_PROG since -# Automake won't build them automatically, either. -all-local: $(EXTRA_LIB) $(EXTRA_PROG) $(chk_TESTS) - -# make install-doc doesn't do anything outside of doc directory, but -# Makefiles should recognize it. -# UPDATE: docs no longer reside in this build tree, so this target -# is depreciated. -install-doc uninstall-doc: - @echo "Nothing to be done." - -# clean up files generated by tests so they can be re-run. -build-check-clean: - $(RM) -rf $(CHECK_CLEANFILES) - -# run check-clean whenever mostlyclean is run -mostlyclean-local: build-check-clean - -# check-install is just a synonym for installcheck -check-install: installcheck - -# Run each test in order, passing $(TEST_FLAGS) to the program. -# Since tests are done in a shell loop, "make -i" does apply inside it. -# Set HDF5_Make_Ignore to a non-blank string to ignore errors inside the loop. -# The timestamps give a rough idea how much time the tests use. -# -# Note that targets in chk_TESTS (defined above) will be built when the user -# types 'make tests' or 'make check', but only programs in TEST_PROG, -# TEST_PROG_PARA, or TEST_SCRIPT will actually be executed. -check-TESTS: test - -test _test: - @$(MAKE) build-check-s - @$(MAKE) build-check-p - -# Actual execution of check-s. -build-check-s: $(LIB) $(PROGS) $(chk_TESTS) - @if test -n "$(TEST_PROG)$(TEST_SCRIPT)"; then \ - echo "===Serial tests in `echo ${PWD} | sed -e s:.*/::` begin `date`==="; \ - fi - @$(MAKE) $(AM_MAKEFLAGS) _exec_check-s - @if test -n "$(TEST_PROG)$(TEST_SCRIPT)"; then \ - echo "===Serial tests in `echo ${PWD} | sed -e s:.*/::` ended `date`===";\ - fi - -_exec_check-s: $(TEST_PROG_CHKEXE) $(TEST_SCRIPT_CHKSH) - -# The dummy.chkexe here prevents the target from being -# empty if there are no tests in the current directory. -# $${log} is the log file. -# $${tname} is the name of test. -$(TEST_PROG_CHKEXE) $(TEST_PROG_PARA_CHKEXE) dummy.chkexe_: - @if test "X$@" != "X.chkexe_" && test "X$@" != "Xdummy.chkexe_"; then \ - tname=$(@:.chkexe_=)$(EXEEXT);\ - log=$(@:.chkexe_=.chklog); \ - echo "============================"; \ - if $(top_srcdir)/bin/newer $(@:.chkexe_=.chkexe) $${tname}; then \ - echo "No need to test $${tname} again."; \ - else \ - echo "============================" > $${log}; \ - if test "X$(FORTRAN_API)" = "Xyes"; then \ - echo "Fortran API: Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \ - echo "Fortran API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \ - elif test "X$(CXX_API)" = "Xyes"; then \ - echo "C++ API: Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \ - echo "C++ API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log};\ - else \ - echo "Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \ - echo "$(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \ - fi; \ - echo "============================" >> $${log}; \ - srcdir="$(srcdir)" \ - $(TIME) $(RUNEXEC) ./$${tname} $(TEST_FLAGS) >> $${log} 2>&1 \ - && touch $(@:.chkexe_=.chkexe) || \ - (test $$HDF5_Make_Ignore && echo "*** Error ignored") || \ - (cat $${log} && false) || exit 1; \ - echo "" >> $${log}; \ - echo "Finished testing $${tname} $(TEST_FLAGS)" >> $${log}; \ - echo "============================" >> $${log}; \ - echo "Finished testing $${tname} $(TEST_FLAGS)"; \ - cat $${log}; \ - fi; \ - fi - -# The dummysh.chkexe here prevents the target from being -# empty if there are no tests in the current directory. -# $${log} is the log file. -# $${tname} is the name of test. -$(TEST_SCRIPT_CHKSH) $(TEST_SCRIPT_PARA_CHKSH) dummysh.chkexe_: - @if test "X$@" != "X.chkexe_" && test "X$@" != "Xdummysh.chkexe_"; then \ - cmd=$(@:.chkexe_=);\ - tname=`basename $$cmd`;\ - chkname=`basename $(@:.chkexe_=.chkexe)`;\ - log=`basename $(@:.chkexe_=.chklog)`; \ - echo "============================"; \ - if $(top_srcdir)/bin/newer $${chkname} $$cmd $(SCRIPT_DEPEND); then \ - echo "No need to test $${tname} again."; \ - else \ - echo "============================" > $${log}; \ - if test "X$(FORTRAN_API)" = "Xyes"; then \ - echo "Fortran API: Testing $${tname} $(TEST_FLAGS)"; \ - echo "Fortran API: $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \ - elif test "X$(CXX_API)" = "Xyes"; then \ - echo "C++ API: Testing $${tname} $(TEST_FLAGS)"; \ - echo "C++ API: $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \ - else \ - echo "Testing $${tname} $(TEST_FLAGS)"; \ - echo "$${tname} $(TEST_FLAGS) Test Log" >> $${log}; \ - fi; \ - echo "============================" >> $${log}; \ - RUNSERIAL="$(RUNSERIAL)" RUNPARALLEL="$(RUNPARALLEL)" \ - srcdir="$(srcdir)" \ - $(TIME) $(SHELL) $$cmd $(TEST_FLAGS) >> $${log} 2>&1 \ - && touch $${chkname} || \ - (test $$HDF5_Make_Ignore && echo "*** Error ignored") || \ - (cat $${log} && false) || exit 1; \ - echo "" >> $${log}; \ - echo "Finished testing $${tname} $(TEST_FLAGS)" >> $${log}; \ - echo "============================" >> $${log}; \ - echo "Finished testing $${tname} $(TEST_FLAGS)"; \ - cat $${log}; \ - fi; \ - echo "============================"; \ - fi - -# Actual execution of check-p. -build-check-p: $(LIB) $(PROGS) $(chk_TESTS) - @if test -n "$(TEST_PROG_PARA)$(TEST_SCRIPT_PARA)"; then \ - echo "===Parallel tests in `echo ${PWD} | sed -e s:.*/::` begin `date`==="; \ - fi - @if test -n "$(TEST_PROG_PARA)"; then \ - echo "**** Hint ****"; \ - echo "Parallel test files reside in the current directory" \ - "by default."; \ - echo "Set HDF5_PARAPREFIX to use another directory. E.g.,"; \ - echo " HDF5_PARAPREFIX=/PFS/user/me"; \ - echo " export HDF5_PARAPREFIX"; \ - echo " make check"; \ - echo "**** end of Hint ****"; \ - fi - @for test in $(TEST_PROG_PARA) dummy; do \ - if test $$test != dummy; then \ - $(MAKE) $(AM_MAKEFLAGS) $$test.chkexe_ \ - RUNEXEC="$(RUNPARALLEL)" || exit 1; \ - fi; \ - done - @for test in $(TEST_SCRIPT_PARA) dummy; do \ - if test $$test != dummy; then \ - $(MAKE) $(AM_MAKEFLAGS) $$test.chkexe_ || exit 1; \ - fi; \ - done - @if test -n "$(TEST_PROG_PARA)$(TEST_SCRIPT_PARA)"; then \ - echo "===Parallel tests in `echo ${PWD} | sed -e s:.*/::` ended `date`===";\ - fi - -# Run test with different Virtual File Driver -check-vfd: $(LIB) $(PROGS) $(chk_TESTS) - @for vfd in $(VFD_LIST) dummy; do \ - if test $$vfd != dummy; then \ - echo "============================"; \ - echo "Testing Virtual File Driver $$vfd"; \ - echo "============================"; \ - $(MAKE) $(AM_MAKEFLAGS) check-clean || exit 1; \ - HDF5_DRIVER=$$vfd $(MAKE) $(AM_MAKEFLAGS) check || exit 1; \ - fi; \ - done - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c new file mode 100644 index 0000000..d62fb55 --- /dev/null +++ b/testpar/t_2Gio.c @@ -0,0 +1,4692 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Parallel tests for datasets + */ + +/* + * Example of using the parallel HDF5 library to access datasets. + * + * This program contains three major parts. Part 1 tests fixed dimension + * datasets, for both independent and collective transfer modes. + * Part 2 tests extendible datasets, for independent transfer mode + * only. + * Part 3 tests extendible datasets, for collective transfer mode + * only. + */ + +#include <stdio.h> +#include "hdf5.h" +#include "testphdf5.h" + +#include "mpi.h" + +/* For this test, we don't want to inherit the RANK definition + * from testphdf5.h. We'll define MAX_RANK to accommodate 3D arrays + * and use that definition rather than RANK. + */ +#ifndef MAX_RANK +#define MAX_RANK 2 +#endif + +/* As with RANK vs MAX_RANK, we use BIG_X_FACTOR vs ROW_FACTOR + * and BIG_Y_FACTOR vs COL_FACTOR. We introduce BIG_Z_FACTOR + * for the 3rd dimension. + */ + +#ifndef BIG_X_FACTOR +#define BIG_X_FACTOR 1048576 +#endif +#ifndef BIG_Y_FACTOR +#define BIG_Y_FACTOR 32 +#endif +#ifndef BIG_Z_FACTOR +#define BIG_Z_FACTOR 2048 +#endif + +#ifndef PATH_MAX +#define PATH_MAX 512 +#endif /* !PATH_MAX */ + +/* global variables */ +int dim0; +int dim1; +int dim2; +int chunkdim0; +int chunkdim1; +int nerrors = 0; /* errors count */ +int ndatasets = 300; /* number of datasets to create*/ +int ngroups = 512; /* number of groups to create in root + * group. */ +int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; + +H5E_auto2_t old_func; /* previous error handler */ +void *old_client_data; /* previous error handler arg.*/ + +#define NFILENAME 3 +#define PARATESTFILE filenames[0] +const char *FILENAME[NFILENAME] = {"ParaTest", "Hugefile", NULL}; +char *filenames[NFILENAME]; +hid_t fapl; /* file access property list */ +MPI_Comm test_comm = MPI_COMM_WORLD; + +// static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */ +// static const char *TestProgName = NULL; +// static void (*TestPrivateUsage)(void) = NULL; +// static int (*TestPrivateParser)(int ac, char *av[]) = NULL; + +/* + * The following are various utility routines used by the tests. + */ + +/* + * Show command usage + */ +static void +usage(void) +{ + HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " + "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); + HDprintf("\t-m<n_datasets>" + "\tset number of datasets for the multiple dataset test\n"); + HDprintf("\t-n<n_groups>" + "\tset number of groups for the multiple group test\n"); + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", BIG_X_FACTOR, + BIG_Y_FACTOR); + HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); +} + +/* + * parse the command line options + */ +static int +parse_options(int argc, char **argv) +{ + int mpi_size, mpi_rank; /* mpi variables */ + + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* setup default chunk-size. Make sure sizes are > 0 */ + + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + + while (--argc) { + if (**(++argv) != '-') { + break; + } + else { + switch (*(*argv + 1)) { + case 'm': + ndatasets = atoi((*argv + 1) + 1); + if (ndatasets < 0) { + nerrors++; + return (1); + } + break; + case 'n': + ngroups = atoi((*argv + 1) + 1); + if (ngroups < 0) { + nerrors++; + return (1); + } + break; + case 'f': + if (--argc < 1) { + nerrors++; + return (1); + } + if (**(++argv) == '-') { + nerrors++; + return (1); + } + paraprefix = *argv; + break; + case 'i': /* Collective MPI-IO access with independent IO */ + dxfer_coll_type = DXFER_INDEPENDENT_IO; + break; + case '2': /* Use the split-file driver with MPIO access */ + /* Can use $HDF5_METAPREFIX to define the */ + /* meta-file-prefix. */ + facc_type = FACC_MPIO | FACC_SPLIT; + break; + case 'd': /* dimensizes */ + if (--argc < 2) { + nerrors++; + return (1); + } + dim0 = atoi(*(++argv)) * mpi_size; + argc--; + dim1 = atoi(*(++argv)) * mpi_size; + /* set default chunkdim sizes too */ + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + break; + case 'c': /* chunk dimensions */ + if (--argc < 2) { + nerrors++; + return (1); + } + chunkdim0 = atoi(*(++argv)); + argc--; + chunkdim1 = atoi(*(++argv)); + break; + case 'h': /* print help message--return with nerrors set */ + return (1); + default: + HDprintf("Illegal option(%s)\n", *argv); + nerrors++; + return (1); + } + } + } /*while*/ + + /* check validity of dimension and chunk sizes */ + if (dim0 <= 0 || dim1 <= 0) { + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + nerrors++; + return (1); + } + if (chunkdim0 <= 0 || chunkdim1 <= 0) { + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + nerrors++; + return (1); + } + + /* Make sure datasets can be divided into equal portions by the processes */ + if ((dim0 % mpi_size) || (dim1 % mpi_size)) { + if (MAINPROCESS) + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); + nerrors++; + return (1); + } + + /* compose the test filenames */ + { + int i, n; + + n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + + for (i = 0; i < n; i++) + if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { + HDprintf("h5_fixname failed\n"); + nerrors++; + return (1); + } + + if (MAINPROCESS) { + HDprintf("Test filenames are:\n"); + for (i = 0; i < n; i++) + HDprintf(" %s\n", filenames[i]); + } + } + + return (0); +} + +/* + * Create the appropriate File access property list + */ +hid_t +create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) +{ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ + + /* need the rank for error checking macros */ + MPI_Comm_rank(test_comm, &mpi_rank); + + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); + + if (l_facc_type == FACC_DEFAULT) + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY((ret >= 0), ""); + return (ret_pl); + } + + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); + } + + /* unknown file access types */ + return (ret_pl); +} + +/* + * Setup the dimensions of the hyperslab. + * Two modes--by rows or by columns. + * Assume dimension rank is 2. + * BYROW divide into slabs of rows + * BYCOL divide into blocks of columns + * ZROW same as BYROW except process 0 gets 0 rows + * ZCOL same as BYCOL except process 0 gets 0 columns + */ +static void +slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) +{ + switch (mode) { + case BYROW: + /* Each process takes a slabs of rows. */ + block[0] = (hsize_t)dim0 / (hsize_t)mpi_size; + block[1] = (hsize_t)dim1; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set BYROW\n"); + break; + case BYCOL: + /* Each process takes a block of columns. */ + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)dim1 / (hsize_t)mpi_size; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + if (VERBOSE_MED) + HDprintf("slab_set BYCOL\n"); + break; + case ZROW: + /* Similar to BYROW except process 0 gets 0 row */ + block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0); + block[1] = (hsize_t)dim1; + stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */ + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[0] : 0); + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set ZROW\n"); + break; + case ZCOL: + /* Similar to BYCOL except process 0 gets 0 column */ + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0); + stride[0] = block[0]; + stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */ + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[1] : 0); + if (VERBOSE_MED) + HDprintf("slab_set ZCOL\n"); + break; + default: + /* Unknown mode. Set it to cover the whole dataset. */ + HDprintf("unknown slab_set mode (%d)\n", mode); + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)dim1; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set wholeset\n"); + break; + } + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + } +} + +/* + * Setup the coordinates for point selection. + */ +void +point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + // HDcompile_assert(MAX_RANK == 3); + HDcompile_assert(MAX_RANK == 2); + + if (OUT_OF_ORDER == order) + k = (num_points * MAX_RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } + + if (VERBOSE_MED) { + HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + k = 0; + for (i = 0; i < num_points; i++) { + HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += 2; + } + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2 and data is stored contiguous. + */ +static void +dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset) +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* put some trivial data in the data_array */ + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); + dataptr++; + } + } +} + +/* + * Print the content of the dataset. + */ +static void +dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +int +dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original) +{ + hsize_t i, j; + int vrfyerrs; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + dataset_print(start, block, original); + HDprintf("compared values:\n"); + dataset_print(start, block, dataset); + } + + vrfyerrs = 0; + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + if (*dataset != *original) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]), + (unsigned long)(j + start[1]), *(original), *(dataset)); + } + dataset++; + original++; + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs); + return (vrfyerrs); +} + +/* NOTE: This is a memory intensive test and is only run + * with 2 MPI ranks and with a testing express level + * of 0, i.e. Exhaustive test run is allowed. Otherwise + * the test is skipped. + * + * Thanks to l.ferraro@cineca.it for the following test:: + * + * This is a simple test case to reproduce a problem + * occurring on LUSTRE filesystem with the creation + * of a 4GB dataset using chunking with parallel HDF5. + * The test works correctly if disabling chunking or + * when the bytes assigned to each process is less + * that 4GB. if equal or more, either hangs or results + * in a PMPI_Waitall error. + * + * $> mpirun -genv I_MPI_EXTRA_FILESYSTEM on + * -genv I_MPI_EXTRA_FILESYSTEM_LIST gpfs + * -n 1 ./h5_mpi_big_dataset.x 1024 1024 1024 + */ + +#define H5FILE_NAME "hugefile.h5" +#define DATASETNAME "dataset" + +static int +MpioTest2G(MPI_Comm comm) +{ + /* + * HDF5 APIs definitions + */ + herr_t status; + hid_t file_id, dset_id; /* file and dataset identifiers */ + hid_t plist_id; /* property list identifier */ + hid_t filespace; /* file and memory dataspace identifiers */ + int *data; /* pointer to data buffer to write */ + size_t tot_size_bytes; + hid_t dcpl_id; + hid_t memorydataspace; + hid_t filedataspace; + size_t slice_per_process; + size_t data_size; + size_t data_size_bytes; + + hsize_t chunk[3]; + hsize_t h5_counts[3]; + hsize_t h5_offsets[3]; + hsize_t shape[3] = {1024, 1024, 1152}; + + /* + * MPI variables + */ + int mpi_size, mpi_rank; + MPI_Info info = MPI_INFO_NULL; + + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + if (mpi_rank == 0) { + HDprintf("Using %d process on dataset shape " + "[%" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE "]\n", + mpi_size, shape[0], shape[1], shape[2]); + } + + /* + * Set up file access property list with parallel I/O access + */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "H5Pcreate file_access succeeded"); + status = H5Pset_fapl_mpio(plist_id, comm, info); + VRFY((status >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* + * Create a new file collectively and release property list identifier. + */ + file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + H5Pclose(plist_id); + + /* + * Create the dataspace for the dataset. + */ + tot_size_bytes = sizeof(int); + for (int i = 0; i < 3; i++) { + tot_size_bytes *= shape[i]; + } + if (mpi_rank == 0) { + HDprintf("Dataset of %zu bytes\n", tot_size_bytes); + } + filespace = H5Screate_simple(3, shape, NULL); + VRFY((filespace >= 0), "H5Screate_simple succeeded"); + + /* + * Select chunking + */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5P_DATASET_CREATE"); + chunk[0] = 4; + chunk[1] = shape[1]; + chunk[2] = shape[2]; + status = H5Pset_chunk(dcpl_id, 3, chunk); + VRFY((status >= 0), "H5Pset_chunk succeeded"); + + /* + * Create the dataset with default properties and close filespace. + */ + dset_id = H5Dcreate2(file_id, DATASETNAME, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + H5Sclose(filespace); + + /* + * Create property list for collective dataset write. + */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "H5P_DATASET_XFER"); + status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); + VRFY((status >= 0), ""); + + H5_CHECKED_ASSIGN(slice_per_process, size_t, (shape[0] + (hsize_t)mpi_size - 1) / (hsize_t)mpi_size, + hsize_t); + data_size = slice_per_process * shape[1] * shape[2]; + data_size_bytes = sizeof(int) * data_size; + data = HDmalloc(data_size_bytes); + VRFY((data != NULL), "data HDmalloc succeeded"); + + for (size_t i = 0; i < data_size; i++) { + data[i] = mpi_rank; + } + + h5_counts[0] = slice_per_process; + h5_counts[1] = shape[1]; + h5_counts[2] = shape[2]; + h5_offsets[0] = (size_t)mpi_rank * slice_per_process; + h5_offsets[1] = 0; + h5_offsets[2] = 0; + filedataspace = H5Screate_simple(3, shape, NULL); + VRFY((filedataspace >= 0), "H5Screate_simple succeeded"); + + // fix reminder along first dimension multiple of chunk[0] + if (h5_offsets[0] + h5_counts[0] > shape[0]) { + h5_counts[0] = shape[0] - h5_offsets[0]; + } + + status = H5Sselect_hyperslab(filedataspace, H5S_SELECT_SET, h5_offsets, NULL, h5_counts, NULL); + VRFY((status >= 0), "H5Sselect_hyperslab succeeded"); + + memorydataspace = H5Screate_simple(3, h5_counts, NULL); + VRFY((memorydataspace >= 0), "H5Screate_simple succeeded"); + + status = H5Dwrite(dset_id, H5T_NATIVE_INT, memorydataspace, filedataspace, plist_id, data); + VRFY((status >= 0), "H5Dwrite succeeded"); + H5Pclose(plist_id); + + /* + * Close/release resources. + */ + H5Sclose(filedataspace); + H5Sclose(memorydataspace); + H5Dclose(dset_id); + H5Fclose(file_id); + + free(data); + HDprintf("Proc %d - MpioTest2G test succeeded\n", mpi_rank); + + if (mpi_rank == 0) + HDremove(FILENAME[1]); + return 0; +} + +/* + * Part 1.a--Independent read/write for fixed dimension datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two datasets + * in one HDF5 files with parallel MPIO access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. + */ + +void +dataset_writeInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + hsize_t dims[MAX_RANK] = { + 1, + }; /* dataset dim sizes */ + hsize_t data_size; + DATATYPE *data_array1 = NULL; /* data buffer */ + const char *filename; + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK]; + hsize_t stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* allocate memory for data buffer */ + data_size = sizeof(DATATYPE); + data_size *= (hsize_t)dim0 * (hsize_t)dim1; + data_array1 = (DATATYPE *)HDmalloc(data_size); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ---------------------------------------- + * CREATE AN HDF5 FILE WITH PARALLEL ACCESS + * ---------------------------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* --------------------------------------------- + * Define the dimensions of the overall datasets + * and the slabs local to the MPI process. + * ------------------------------------------- */ + /* setup dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create a dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* + * To test the independent orders of writes between processes, all + * even number processes write to dataset1 first, then dataset2. + * All odd number processes write to dataset2 first, then dataset1. + */ + + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* write data independently */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + + /* setup dimensions again to write with zero rows for process 0 */ + if (VERBOSE_MED) + HDprintf("writeInd by some with zero row\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("writeInd by some with zero row"); + if ((mpi_rank / 2) * 2 != mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); + } +#ifdef BARRIER_CHECKS + MPI_Barrier(test_comm); +#endif /* BARRIER_CHECKS */ + + /* release dataspace ID */ + H5Sclose(file_dataspace); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* release all IDs created */ + H5Sclose(sid); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); +} + +/* Example of using the parallel HDF5 library to read a dataset */ +void +dataset_readInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + const char *filename; + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Independent read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), ""); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), ""); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset2 >= 0), ""); + + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + + /* read data independently */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* read data independently */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), ""); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), ""); + + /* release all IDs created */ + H5Sclose(file_dataspace); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Part 1.b--Collective read/write for fixed dimension datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +void +dataset_writeAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */ + hid_t dataset5, dataset6, dataset7; /* Dataset ID */ + hid_t datatype; /* Datatype ID */ + hsize_t dims[MAX_RANK] = { + 1, + }; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + const char *filename; + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK]; + hsize_t stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + hsize_t current_dims; /* for point selection */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Collective write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* set up the coords array selection */ + num_points = (size_t)dim1; + coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)MAX_RANK * sizeof(hsize_t)); + VRFY((coords != NULL), "coords malloc succeeded"); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------- + * Define the dimensions of the overall datasets + * and create the dataset + * ------------------------- */ + /* setup 2-D dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create a dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another dataset collectively */ + datatype = H5Tcopy(H5T_NATIVE_INT); + ret = H5Tset_order(datatype, H5T_ORDER_LE); + VRFY((ret >= 0), "H5Tset_order succeeded"); + + dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded"); + + /* create a third dataset collectively */ + dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset3 >= 0), "H5Dcreate2 succeeded"); + + dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset5 >= 0), "H5Dcreate2 succeeded"); + dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset6 >= 0), "H5Dcreate2 succeeded"); + dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset7 >= 0), "H5Dcreate2 succeeded"); + + /* release 2-D space ID created */ + H5Sclose(sid); + + /* setup scalar dimensionality object */ + sid = H5Screate(H5S_SCALAR); + VRFY((sid >= 0), "H5Screate succeeded"); + + /* create a fourth dataset collectively */ + dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset4 >= 0), "H5Dcreate2 succeeded"); + + /* release scalar space ID created */ + H5Sclose(sid); + + /* + * Set up dimensions of the slab this process accesses. + */ + + /* Dataset1: each process takes a block of rows. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + MESG("writeAll by Row"); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* setup dimensions again to writeAll with zero rows for process 0 */ + if (VERBOSE_MED) + HDprintf("writeAll by some with zero row\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("writeAll by some with zero row"); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); + + /* release all temporary handles. */ + /* Could have used them for dataset2 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset2: each process takes a block of columns. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + + /* setup dimensions again to writeAll with zero columns for process 0 */ + if (VERBOSE_MED) + HDprintf("writeAll by some with zero col\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("writeAll by some with zero col"); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded"); + + /* release all temporary handles. */ + /* Could have used them for dataset3 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset3); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded"); + } /* end if */ + else { + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); + } /* end else */ + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + if (MAINPROCESS) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded"); + } /* end if */ + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } /* end if */ + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + MESG("writeAll with none"); + ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); + + /* write data collectively (with datatype conversion) */ + MESG("writeAll with none"); + ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); + + /* release all temporary handles. */ + /* Could have used them for dataset4 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset4: each process writes no data, except process zero uses "all" selection. */ + /* Additionally, these are in a scalar dataspace */ + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset4); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded"); + } /* end if */ + else { + ret = H5Sselect_all(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } /* end else */ + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate(H5S_SCALAR); + VRFY((mem_dataspace >= 0), ""); + if (MAINPROCESS) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded"); + } /* end if */ + else { + ret = H5Sselect_all(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } /* end else */ + + /* fill the local slab with some trivial data */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } /* end if */ + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + MESG("writeAll with scalar dataspace"); + ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); + + /* write data collectively (with datatype conversion) */ + MESG("writeAll with scalar dataspace"); + ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + block[0] = 1; + block[1] = (hsize_t)dim1; + stride[0] = 1; + stride[1] = (hsize_t)dim1; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; + start[1] = 0; + + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* Dataset5: point selection in File - Hyperslab selection in Memory*/ + /* create a file dataspace independently */ + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + file_dataspace = H5Dget_space(dataset5); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + start[0] = 0; + start[1] = 0; + mem_dataspace = H5Dget_space(dataset5); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset5 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset6: point selection in File - Point selection in Memory*/ + /* create a file dataspace independently */ + start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + file_dataspace = H5Dget_space(dataset6); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + start[0] = 0; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + mem_dataspace = H5Dget_space(dataset6); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset6 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset7: point selection in File - All selection in Memory*/ + /* create a file dataspace independently */ + start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + file_dataspace = H5Dget_space(dataset7); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); + VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); + + ret = H5Sselect_all(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_all succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset7 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* + * All writes completed. Close datasets collectively + */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + ret = H5Dclose(dataset3); + VRFY((ret >= 0), "H5Dclose3 succeeded"); + ret = H5Dclose(dataset4); + VRFY((ret >= 0), "H5Dclose4 succeeded"); + ret = H5Dclose(dataset5); + VRFY((ret >= 0), "H5Dclose5 succeeded"); + ret = H5Dclose(dataset6); + VRFY((ret >= 0), "H5Dclose6 succeeded"); + ret = H5Dclose(dataset7); + VRFY((ret >= 0), "H5Dclose7 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); +} + +/* + * Example of using the parallel HDF5 library to read two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +void +dataset_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + const char *filename; + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + int i, j, k; + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Collective read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* set up the coords array selection */ + num_points = (size_t)dim1; + coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * MAX_RANK * sizeof(hsize_t)); + VRFY((coords != NULL), "coords malloc succeeded"); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------- + * Open the datasets in it + * ------------------------- */ + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded"); + + /* open another dataset collectively */ + dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT); + VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded"); + dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT); + VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded"); + dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT); + VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded"); + + /* + * Set up dimensions of the slab this process accesses. + */ + + /* Dataset1: each process takes a block of columns. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset1 succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* setup dimensions again to readAll with zero columns for process 0 */ + if (VERBOSE_MED) + HDprintf("readAll by some with zero col\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("readAll by some with zero col"); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + /* Could have used them for dataset2 but it is cleaner */ + /* to create them again.*/ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* Dataset2: each process takes a block of rows. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset2 succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* setup dimensions again to readAll with zero rows for process 0 */ + if (VERBOSE_MED) + HDprintf("readAll by some with zero row\n"); + slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + /* need to make mem_dataspace to match for process 0 */ + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + } + MESG("readAll by some with zero row"); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + if (data_origin1) + free(data_origin1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); + + block[0] = 1; + block[1] = (hsize_t)dim1; + stride[0] = 1; + stride[1] = (hsize_t)dim1; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; + start[1] = 0; + + dataset_fill(start, block, data_origin1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); + } + + /* Dataset5: point selection in memory - Hyperslab selection in file*/ + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset5); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + start[0] = 0; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + mem_dataspace = H5Dget_space(dataset5); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset5 succeeded"); + + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* Dataset6: point selection in File - Point selection in Memory*/ + /* create a file dataspace independently */ + start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + file_dataspace = H5Dget_space(dataset6); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + start[0] = 0; + start[1] = 0; + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + mem_dataspace = H5Dget_space(dataset6); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset6 succeeded"); + + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* Dataset7: point selection in memory - All selection in file*/ + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset7); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_all(file_dataspace); + VRFY((ret >= 0), "H5Sselect_all succeeded"); + + num_points = (size_t)dim0 * (size_t)dim1; + k = 0; + for (i = 0; i < dim0; i++) { + for (j = 0; j < dim1; j++) { + coords[k++] = (hsize_t)i; + coords[k++] = (hsize_t)j; + } + } + mem_dataspace = H5Dget_space(dataset7); + VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((ret >= 0), "H5Sselect_elements succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread dataset7 succeeded"); + + start[0] = (hsize_t)dim0 / (hsize_t)mpi_size * (hsize_t)mpi_rank; + start[1] = 0; + ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank), + data_origin1); + if (ret) + nerrors++; + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* + * All reads completed. Close datasets collectively + */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + ret = H5Dclose(dataset5); + VRFY((ret >= 0), "H5Dclose5 succeeded"); + ret = H5Dclose(dataset6); + VRFY((ret >= 0), "H5Dclose6 succeeded"); + ret = H5Dclose(dataset7); + VRFY((ret >= 0), "H5Dclose7 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Part 2--Independent read/write for extendible datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two extendible + * datasets in one HDF5 file with independent parallel MPIO access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. + */ + +void +extend_writeInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[MAX_RANK]; /* dataset dim sizes */ + hsize_t max_dims[MAX_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK]; /* for hyperslab setting */ + hsize_t stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* setup chunk-size. Make sure sizes are > 0 */ + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* Reduce the number of metadata cache slots, so that there are cache + * collisions during the raw data I/O on the chunked dataset. This stresses + * the metadata cache and tests for cache bugs. -QAK + */ + { + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + + ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + VRFY((ret >= 0), "H5Pget_cache succeeded"); + mdc_nelmts = 4; + ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + } + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + dataset_pl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + /* start out with no rows, extend it later. */ + dims[0] = dims[1] = 0; + sid = H5Screate_simple(MAX_RANK, dims, max_dims); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another extendible dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* release resource */ + H5Sclose(sid); + H5Pclose(dataset_pl); + + /* ------------------------- + * Test writing to dataset1 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Extend its current dim sizes before writing */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset1, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + + /* ------------------------- + * Test writing to dataset2 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Try write to dataset2 beyond its current dim sizes. Should fail. */ + /* Temporary turn off auto error reporting */ + H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); + H5Eset_auto2(H5E_DEFAULT, NULL, NULL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently. Should fail. */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret < 0), "H5Dwrite failed as expected"); + + /* restore auto error reporting */ + H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); + H5Sclose(file_dataspace); + + /* Extend dataset2 and try again. Should succeed. */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset2, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); +} + +/* + * Example of using the parallel HDF5 library to create an extendable dataset + * and perform I/O on it in a way that verifies that the chunk cache is + * bypassed for parallel I/O. + */ + +void +extend_writeInd2(void) +{ + const char *filename; + hid_t fid; /* HDF5 file ID */ + hid_t fapl_id; /* File access templates */ + hid_t fs; /* File dataspace ID */ + hid_t ms; /* Memory dataspace ID */ + hid_t dataset; /* Dataset ID */ + hsize_t orig_size = 10; /* Original dataset dim size */ + hsize_t new_size = 20; /* Extended dataset dim size */ + hsize_t one = 1; + hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ + hsize_t chunk_size = 16384; /* chunk size */ + hid_t dcpl; /* dataset create prop. list */ + int written[10], /* Data to write */ + retrieved[10]; /* Data read in */ + int mpi_size, mpi_rank; /* MPI settings */ + int i; /* Local index variable */ + herr_t ret; /* Generic return value */ + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Extend independent write test #2 on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + fapl_id = create_faccess_plist(test_comm, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dcpl, 1, &chunk_size); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + fs = H5Screate_simple(1, &orig_size, &max_size); + VRFY((fs >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreat2e succeeded"); + + /* release resource */ + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* ------------------------- + * Test writing to dataset + * -------------------------*/ + /* create a memory dataspace independently */ + ms = H5Screate_simple(1, &orig_size, &max_size); + VRFY((ms >= 0), "H5Screate_simple succeeded"); + + /* put some trivial data in the data_array */ + for (i = 0; i < (int)orig_size; i++) + written[i] = i; + MESG("data array initialized"); + if (VERBOSE_MED) { + MESG("writing at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", written[i]); + HDprintf("\n"); + } + ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* ------------------------- + * Read initial data from dataset. + * -------------------------*/ + ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); + VRFY((ret >= 0), "H5Dread succeeded"); + for (i = 0; i < (int)orig_size; i++) + if (written[i] != retrieved[i]) { + HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, + written[i], i, retrieved[i]); + nerrors++; + } + if (VERBOSE_MED) { + MESG("read at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", retrieved[i]); + HDprintf("\n"); + } + + /* ------------------------- + * Extend the dataset & retrieve new dataspace + * -------------------------*/ + ret = H5Dset_extent(dataset, &new_size); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + ret = H5Sclose(fs); + VRFY((ret >= 0), "H5Sclose succeeded"); + fs = H5Dget_space(dataset); + VRFY((fs >= 0), "H5Dget_space succeeded"); + + /* ------------------------- + * Write to the second half of the dataset + * -------------------------*/ + for (i = 0; i < (int)orig_size; i++) + H5_CHECKED_ASSIGN(written[i], int, orig_size + (hsize_t)i, hsize_t); + MESG("data array re-initialized"); + if (VERBOSE_MED) { + MESG("writing at offset 10: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", written[i]); + HDprintf("\n"); + } + ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size); + VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); + ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* ------------------------- + * Read the new data + * -------------------------*/ + ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); + VRFY((ret >= 0), "H5Dread succeeded"); + for (i = 0; i < (int)orig_size; i++) + if (written[i] != retrieved[i]) { + HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, + written[i], i, retrieved[i]); + nerrors++; + } + if (VERBOSE_MED) { + MESG("read at offset 10: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", retrieved[i]); + HDprintf("\n"); + } + + /* Close dataset collectively */ + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + + /* Close the file collectively */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); +} + +/* Example of using the parallel HDF5 library to read an extendible dataset */ +void +extend_readInd(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + hsize_t dims[MAX_RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_array2 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + const char *filename; + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Extend independent read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), ""); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), ""); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset2 >= 0), ""); + + /* Try extend dataset1 which is open RDONLY. Should fail. */ + /* first turn off auto error reporting */ + H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); + H5Eset_auto2(H5E_DEFAULT, NULL, NULL); + + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); + VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); + dims[0]++; + ret = H5Dset_extent(dataset1, dims); + VRFY((ret < 0), "H5Dset_extent failed as expected"); + + /* restore auto error reporting */ + H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); + H5Sclose(file_dataspace); + + /* Read dataset1 using BYROW pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* read data independently */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset1 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + + /* Read dataset2 using BYCOL pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* read data independently */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset2 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), ""); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), ""); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); + if (data_array2) + HDfree(data_array2); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Part 3--Collective read/write for extendible datasets. + */ + +/* + * Example of using the parallel HDF5 library to create two extendible + * datasets in one HDF5 file with collective parallel MPIO access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. + */ + +void +extend_writeAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[MAX_RANK]; /* dataset dim sizes */ + hsize_t max_dims[MAX_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK]; /* for hyperslab setting */ + hsize_t stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* setup chunk-size. Make sure sizes are > 0 */ + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* Reduce the number of metadata cache slots, so that there are cache + * collisions during the raw data I/O on the chunked dataset. This stresses + * the metadata cache and tests for cache bugs. -QAK + */ + { + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + + ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + VRFY((ret >= 0), "H5Pget_cache succeeded"); + mdc_nelmts = 4; + ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + } + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + dataset_pl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + /* start out with no rows, extend it later. */ + dims[0] = dims[1] = 0; + sid = H5Screate_simple(MAX_RANK, dims, max_dims); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another extendible dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* release resource */ + H5Sclose(sid); + H5Pclose(dataset_pl); + + /* ------------------------- + * Test writing to dataset1 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Extend its current dim sizes before writing */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset1, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* write data collectively */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + /* ------------------------- + * Test writing to dataset2 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* put some trivial data in the data_array */ + dataset_fill(start, block, data_array1); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Try write to dataset2 beyond its current dim sizes. Should fail. */ + /* Temporary turn off auto error reporting */ + H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); + H5Eset_auto2(H5E_DEFAULT, NULL, NULL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently. Should fail. */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret < 0), "H5Dwrite failed as expected"); + + /* restore auto error reporting */ + H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); + H5Sclose(file_dataspace); + + /* Extend dataset2 and try again. Should succeed. */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset2, dims); + VRFY((ret >= 0), "H5Dset_extent succeeded"); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* write data independently */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* release resource */ + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); +} + +/* Example of using the parallel HDF5 library to read an extendible dataset */ +void +extend_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[MAX_RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_array2 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Extend independent read test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* allocate memory for data buffer */ + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); + data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); + VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); + VRFY((fid >= 0), ""); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* open the dataset1 collectively */ + dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset1 >= 0), ""); + + /* open another dataset collectively */ + dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); + VRFY((dataset2 >= 0), ""); + + /* Try extend dataset1 which is open RDONLY. Should fail. */ + /* first turn off auto error reporting */ + H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); + H5Eset_auto2(H5E_DEFAULT, NULL, NULL); + + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); + VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); + dims[0]++; + ret = H5Dset_extent(dataset1, dims); + VRFY((ret < 0), "H5Dset_extent failed as expected"); + + /* restore auto error reporting */ + H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); + H5Sclose(file_dataspace); + + /* Read dataset1 using BYROW pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset1 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + H5Pclose(xfer_plist); + + /* Read dataset2 using BYCOL pattern */ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), ""); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), ""); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + dataset_fill(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* verify the read data with original expected data */ + ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); + VRFY((ret == 0), "dataset2 read verified correct"); + if (ret) + nerrors++; + + H5Sclose(mem_dataspace); + H5Sclose(file_dataspace); + H5Pclose(xfer_plist); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), ""); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), ""); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_array1) + HDfree(data_array1); + if (data_array2) + HDfree(data_array2); + if (data_origin1) + HDfree(data_origin1); +} + +/* + * Example of using the parallel HDF5 library to read a compressed + * dataset in an HDF5 file with collective parallel access support. + */ +#ifdef H5_HAVE_FILTER_DEFLATE +void +compress_readAll(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t dcpl; /* Dataset creation property list */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t dataspace; /* Dataspace ID */ + hid_t dataset; /* Dataset ID */ + int rank = 1; /* Dataspace rank */ + hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */ + unsigned u; /* Local index variable */ + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ + DATATYPE *data_read = NULL; /* data buffer */ + DATATYPE *data_orig = NULL; /* expected data buffer */ + const char *filename; + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Collective chunked dataset read test on file %s\n", filename); + + /* Retrieve MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Allocate data buffer */ + data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE)); + VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded"); + data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE)); + VRFY((data_read != NULL), "data_array1 HDmalloc succeeded"); + + /* Initialize data buffers */ + for (u = 0; u < dim; u++) + data_orig[u] = (DATATYPE)u; + + /* Run test both with and without filters disabled on partial chunks */ + for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Process zero creates the file with a compressed, chunked dataset */ + if (mpi_rank == 0) { + hsize_t chunk_dim; /* Chunk dimensions */ + + /* Create the file */ + fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid > 0), "H5Fcreate succeeded"); + + /* Create property list for chunking and compression */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl > 0), "H5Pcreate succeeded"); + + ret = H5Pset_layout(dcpl, H5D_CHUNKED); + VRFY((ret >= 0), "H5Pset_layout succeeded"); + + /* Use eight chunks */ + chunk_dim = dim / 8; + ret = H5Pset_chunk(dcpl, rank, &chunk_dim); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* Set chunk options appropriately */ + if (disable_partial_chunk_filters) { + ret = H5Pget_chunk_opts(dcpl, &chunk_opts); + VRFY((ret >= 0), "H5Pget_chunk_opts succeeded"); + + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + + ret = H5Pset_chunk_opts(dcpl, chunk_opts); + VRFY((ret >= 0), "H5Pset_chunk_opts succeeded"); + } /* end if */ + + ret = H5Pset_deflate(dcpl, 9); + VRFY((ret >= 0), "H5Pset_deflate succeeded"); + + /* Create dataspace */ + dataspace = H5Screate_simple(rank, &dim, NULL); + VRFY((dataspace > 0), "H5Screate_simple succeeded"); + + /* Create dataset */ + dataset = + H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), "H5Dcreate2 succeeded"); + + /* Write compressed data */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Close objects */ + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Sclose(dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + /* Wait for file to be created */ + MPI_Barrier(comm); + + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); + VRFY((fid > 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Open dataset with compressed chunks */ + dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); + VRFY((dataset > 0), "H5Dopen2 succeeded"); + + /* Try reading & writing data */ + if (dataset > 0) { + /* Create dataset transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist > 0), "H5Pcreate succeeded"); + + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Try reading the data */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Dread succeeded"); + + /* Verify data read */ + for (u = 0; u < dim; u++) + if (data_orig[u] != data_read[u]) { + HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__, + (unsigned)u, data_orig[u], (unsigned)u, data_read[u]); + nerrors++; + } + +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Dwrite succeeded"); +#endif + + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + } /* end if */ + + /* Close file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } /* end for */ + + /* release data buffers */ + if (data_read) + HDfree(data_read); + if (data_orig) + HDfree(data_orig); +} +#endif /* H5_HAVE_FILTER_DEFLATE */ + +/* + * Part 4--Non-selection for chunked dataset + */ + +/* + * Example of using the parallel HDF5 library to create chunked + * dataset in one HDF5 file with collective and independent parallel + * MPIO access support. The Datasets are of sizes dim0 x dim1. + * Each process controls only a slab of size dim0 x dim1 within the + * dataset with the exception that one processor selects no element. + */ + +void +none_selection_chunk(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + const char *filename; + hsize_t dims[MAX_RANK]; /* dataset dim sizes */ + DATATYPE *data_origin = NULL; /* data buffer */ + DATATYPE *data_array = NULL; /* data buffer */ + hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ + + hsize_t start[MAX_RANK]; /* for hyperslab setting */ + hsize_t count[MAX_RANK]; /* for hyperslab setting */ + hsize_t stride[MAX_RANK]; /* for hyperslab setting */ + hsize_t block[MAX_RANK]; /* for hyperslab setting */ + hsize_t mstart[MAX_RANK]; /* for data buffer in memory */ + + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + filename = GetTestParameters(); + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + /* setup chunk-size. Make sure sizes are > 0 */ + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; + + /* ------------------- + * START AN HDF5 FILE + * -------------------*/ + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* -------------------------------------------------------------- + * Define the dimensions of the overall datasets and create them. + * ------------------------------------------------------------- */ + + /* set up dataset storage chunk sizes and creation property list */ + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + dataset_pl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* setup dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create an extendible dataset collectively */ + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + /* create another extendible dataset collectively */ + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* release resource */ + H5Sclose(sid); + H5Pclose(dataset_pl); + + /* ------------------------- + * Test collective writing to dataset1 + * -------------------------*/ + /* set up dimensions of the slab this process accesses */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + /* allocate memory for data buffer. Only allocate enough buffer for + * each processor's data. */ + if (mpi_rank) { + data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE)); + VRFY((data_origin != NULL), "data_origin HDmalloc succeeded"); + + data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + + /* put some trivial data in the data_array */ + mstart[0] = mstart[1] = 0; + dataset_fill(mstart, block, data_origin); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(mstart, block, data_origin); + } + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(MAX_RANK, block, NULL); + VRFY((mem_dataspace >= 0), ""); + + /* Process 0 has no selection */ + if (!mpi_rank) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset1); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Process 0 has no selection */ + if (!mpi_rank) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none succeeded"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* write data collectively */ + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* read data independently */ + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + if (mpi_rank) { + ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); + if (ret) + nerrors++; + } + + /* ------------------------- + * Test independent writing to dataset2 + * -------------------------*/ + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* write data collectively */ + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* read data independently */ + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + /* verify the read data with original expected data */ + if (mpi_rank) { + ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); + if (ret) + nerrors++; + } + + /* release resource */ + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* close dataset collectively */ + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose1 succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose2 succeeded"); + + /* close the file collectively */ + H5Fclose(fid); + + /* release data buffers */ + if (data_origin) + HDfree(data_origin); + if (data_array) + HDfree(data_array); +} + +/* Function: test_actual_io_mode + * + * Purpose: tests one specific case of collective I/O and checks that the + * actual_chunk_opt_mode property and the actual_io_mode + * properties in the DXPL have the correct values. + * + * Input: selection_mode: changes the way processes select data from the space, as well + * as some dxpl flags to get collective I/O to break in different ways. + * + * The relevant I/O function and expected response for each mode: + * TEST_ACTUAL_IO_MULTI_CHUNK_IND: + * H5D_mpi_chunk_collective_io, each process reports independent I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_COL: + * H5D_mpi_chunk_collective_io, each process reports collective I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_MIX: + * H5D_mpi_chunk_collective_io, each process reports mixed I/O + * + * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: + * H5D_mpi_chunk_collective_io, processes disagree. The root reports + * collective, the rest report independent I/O + * + * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: + * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. + * Set directly go to multi-chunk-io without num threshold calc. + * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: + * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL. + * Set directly go to multi-chunk-io without num threshold calc. + * + * TEST_ACTUAL_IO_LINK_CHUNK: + * H5D_link_chunk_collective_io, processes report linked chunk I/O + * + * TEST_ACTUAL_IO_CONTIGUOUS: + * H5D__contig_collective_write or H5D__contig_collective_read + * each process reports contiguous collective I/O + * + * TEST_ACTUAL_IO_NO_COLLECTIVE: + * Simple independent I/O. This tests that the defaults are properly set. + * + * TEST_ACTUAL_IO_RESET: + * Performs collective and then independent I/O with the same dxpl to + * make sure the peroperty is correctly reset to the default on each use. + * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE + * (The most complex case that works on all builds) and then performs + * an independent read and write with the same dxpls. + * + * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE + * is not needed as they are covered by DIRECT_CHUNK_MIX and + * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing + * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold. + * + * Modification: + * - Refctore to remove multi-chunk-without-opimization test and update for + * testing direct to multi-chunk-io + * Programmer: Jonathan Kim + * Date: 2012-10-10 + * + * + * Programmer: Jacob Gruber + * Date: 2011-04-06 + */ +static void +test_actual_io_mode(int selection_mode) +{ + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + const char *filename; + const char *test_name; + hbool_t direct_multi_chunk_io; + hbool_t multi_chunk_io; + hbool_t is_chunked; + hbool_t is_collective; + int mpi_size = -1; + int mpi_rank = -1; + int length; + int *buffer; + int i; + MPI_Comm mpi_comm = MPI_COMM_NULL; + MPI_Info mpi_info = MPI_INFO_NULL; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl_id = -1; + hid_t mem_space = -1; + hid_t file_space = -1; + hid_t dcpl = -1; + hid_t dxpl_write = -1; + hid_t dxpl_read = -1; + hsize_t dims[MAX_RANK]; + hsize_t chunk_dims[MAX_RANK]; + hsize_t start[MAX_RANK]; + hsize_t stride[MAX_RANK]; + hsize_t count[MAX_RANK]; + hsize_t block[MAX_RANK]; + char message[256]; + herr_t ret; + + /* Set up some flags to make some future if statements slightly more readable */ + direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + + /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then + * tests independent I/O + */ + multi_chunk_io = + (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET); + + is_chunked = + (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); + + is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; + + /* Set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + MPI_Barrier(test_comm); + + HDassert(mpi_size >= 1); + + mpi_comm = test_comm; + mpi_info = MPI_INFO_NULL; + + filename = (const char *)GetTestParameters(); + HDassert(filename != NULL); + + /* Setup the file access template */ + fapl_id = create_faccess_plist(mpi_comm, mpi_info, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist() succeeded"); + + /* Create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Create the basic Space */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* Create the dataset creation plist */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation plist created successfully"); + + /* If we are not testing contiguous datasets */ + if (is_chunked) { + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)mpi_size; + chunk_dims[1] = dims[1]; + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); + } + + /* Create the dataset */ + dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); + + /* Create the file dataspace */ + file_space = H5Dget_space(dataset); + VRFY((file_space >= 0), "H5Dget_space succeeded"); + + /* Choose a selection method based on the type of I/O we want to occur, + * and also set up some selection-dependeent test info. */ + switch (selection_mode) { + + /* Independent I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_IND: + case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: + /* Since the dataset is chunked by row and each process selects a row, + * each process writes to a different chunk. This forces all I/O to be + * independent. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Multi Chunk - Independent"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + break; + + /* Collective I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_COL: + case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: + /* The dataset is chunked by rows, so each process takes a column which + * spans all chunks. Since the processes write non-overlapping regular + * selections to each chunk, the operation is purely collective. + */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + + test_name = "Multi Chunk - Collective"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + if (mpi_size > 1) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + break; + + /* Mixed I/O with optimization */ + case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: + /* A chunk will be assigned collective I/O only if it is selected by each + * process. To get mixed I/O, have the root select all chunks and each + * subsequent process select the first and nth chunk. The first chunk, + * accessed by all, will be assigned collective I/O while each other chunk + * will be accessed only by the root and the nth process and will be + * assigned independent I/O. Each process will access one chunk collectively + * and at least one chunk independently, reporting mixed I/O. + */ + + if (mpi_rank == 0) { + /* Select the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + } + else { + /* Select the first and the nth chunk in the nth column */ + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)(dim1 / mpi_size); + count[0] = 2; + count[1] = 1; + stride[0] = (hsize_t)mpi_rank * block[0]; + stride[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + } + + test_name = "Multi Chunk - Mixed"; + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + break; + + /* RESET tests that the properties are properly reset to defaults each time I/O is + * performed. To achieve this, we have RESET perform collective I/O (which would change + * the values from the defaults) followed by independent I/O (which should report the + * default values). RESET doesn't need to have a unique selection, so we reuse + * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works + * on all builds. The independent section of RESET can be found at the end of this function. + */ + case TEST_ACTUAL_IO_RESET: + + /* Mixed I/O with optimization and internal disagreement */ + case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: + /* A chunk will be assigned collective I/O only if it is selected by each + * process. To get mixed I/O with disagreement, assign process n to the + * first chunk and the nth chunk. The first chunk, selected by all, is + * assgigned collective I/O, while each other process gets independent I/O. + * Since the root process with only access the first chunk, it will report + * collective I/O. The subsequent processes will access the first chunk + * collectively, and their other chunk independently, reporting mixed I/O. + */ + + if (mpi_rank == 0) { + /* Select the first chunk in the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + block[0] = block[0] / (hsize_t)mpi_size; + } + else { + /* Select the first and the nth chunk in the nth column */ + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)(dim1 / mpi_size); + count[0] = 2; + count[1] = 1; + stride[0] = (hsize_t)mpi_rank * block[0]; + stride[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + } + + /* If the testname was not already set by the RESET case */ + if (selection_mode == TEST_ACTUAL_IO_RESET) + test_name = "RESET"; + else + test_name = "Multi Chunk - Mixed (Disagreement)"; + + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; + if (mpi_size > 1) { + if (mpi_rank == 0) + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + else + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + } + else + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + + break; + + /* Linked Chunk I/O */ + case TEST_ACTUAL_IO_LINK_CHUNK: + /* Nothing special; link chunk I/O is forced in the dxpl settings. */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Link Chunk"; + actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + break; + + /* Contiguous Dataset */ + case TEST_ACTUAL_IO_CONTIGUOUS: + /* A non overlapping, regular selection in a contiguous dataset leads to + * collective I/O */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Contiguous"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; + break; + + case TEST_ACTUAL_IO_NO_COLLECTIVE: + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); + + test_name = "Independent"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + break; + + default: + test_name = "Undefined Selection Mode"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + break; + } + + ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Create a memory dataspace mirroring the dataset and select the same hyperslab + * as in the file space. + */ + mem_space = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((mem_space >= 0), "mem_space created"); + + ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* Get the number of elements in the selection */ + length = dim0 * dim1; + + /* Allocate and initialize the buffer */ + buffer = (int *)HDmalloc(sizeof(int) * (size_t)length); + VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); + for (i = 0; i < length; i++) + buffer[i] = i; + + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + /* Set collective I/O properties in the dxpl. */ + if (is_collective) { + /* Request collective I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Set the threshold number of processes per chunk to twice mpi_size. + * This will prevent the threshold from ever being met, thus forcing + * multi chunk io instead of link chunk io. + * This is via default. + */ + if (multi_chunk_io) { + /* force multi-chunk-io by threshold */ + ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); + + /* set this to manipulate testing scenario about allocating processes + * to chunks */ + ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); + } + + /* Set directly go to multi-chunk-io without threshold calc. */ + if (direct_multi_chunk_io) { + /* set for multi chunk io by property*/ + ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + } + + /* Make a copy of the dxpl to test the read operation */ + dxpl_read = H5Pcopy(dxpl_write); + VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); + + /* Write */ + ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Retrieve Actual io values */ + ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + /* Read */ + ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); + + /* Retrieve Actual io values */ + ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + /* Check write vs read */ + VRFY((actual_io_mode_read == actual_io_mode_write), + "reading and writing are the same for actual_io_mode"); + VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), + "reading and writing are the same for actual_chunk_opt_mode"); + + /* Test values */ + if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 && + actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) { + HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n", + test_name); + VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); + HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name); + VRFY((actual_io_mode_write == actual_io_mode_expected), message); + } + else { + HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write, + actual_io_mode_write); + } + + /* To test that the property is successfully reset to the default, we perform some + * independent I/O after the collective I/O + */ + if (selection_mode == TEST_ACTUAL_IO_RESET) { + if (mpi_rank == 0) { + /* Switch to independent io */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Write */ + ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Check Properties */ + ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, + "actual_chunk_opt_mode has correct value for reset write (independent)"); + VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, + "actual_io_mode has correct value for reset write (independent)"); + + /* Read */ + ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Check Properties */ + ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); + ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + + VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, + "actual_chunk_opt_mode has correct value for reset read (independent)"); + VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, + "actual_io_mode has correct value for reset read (independent)"); + } + } + + /* Release some resources */ + ret = H5Sclose(sid); + ret = H5Pclose(fapl_id); + ret = H5Pclose(dcpl); + ret = H5Pclose(dxpl_write); + ret = H5Pclose(dxpl_read); + ret = H5Dclose(dataset); + ret = H5Sclose(mem_space); + ret = H5Sclose(file_space); + ret = H5Fclose(fid); + HDfree(buffer); + return; +} + +/* Function: actual_io_mode_tests + * + * Purpose: Tests all possible cases of the actual_io_mode property. + * + * Programmer: Jacob Gruber + * Date: 2011-04-06 + */ +void +actual_io_mode_tests(void) +{ + int mpi_size = -1; + int mpi_rank = -1; + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_size(test_comm, &mpi_rank); + + test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); + + /* + * Test multi-chunk-io via proc_num threshold + */ + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); + + /* The Multi Chunk Mixed test requires at least three processes. */ + if (mpi_size > 2) + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); + else + HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); + + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); + + /* + * Test multi-chunk-io via setting direct property + */ + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + + test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); + test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + + test_actual_io_mode(TEST_ACTUAL_IO_RESET); + return; +} + +/* + * Function: test_no_collective_cause_mode + * + * Purpose: + * tests cases for broken collective I/O and checks that the + * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values. + * + * Input: + * selection_mode: various mode to cause broken collective I/O + * Note: Originally, each TEST case is supposed to be used alone. + * After some discussion, this is updated to take multiple TEST cases + * with '|'. However there is no error check for any of combined + * test cases, so a tester is responsible to understand and feed + * proper combination of TESTs if needed. + * + * + * TEST_COLLECTIVE: + * Test for regular collective I/O without cause of breaking. + * Just to test normal behavior. + * + * TEST_SET_INDEPENDENT: + * Test for Independent I/O as the cause of breaking collective I/O. + * + * TEST_DATATYPE_CONVERSION: + * Test for Data Type Conversion as the cause of breaking collective I/O. + * + * TEST_DATA_TRANSFORMS: + * Test for Data Transform feature as the cause of breaking collective I/O. + * + * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES: + * Test for NULL dataspace as the cause of breaking collective I/O. + * + * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT: + * Test for Compact layout as the cause of breaking collective I/O. + * + * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL: + * Test for Externl-File storage as the cause of breaking collective I/O. + * + * Programmer: Jonathan Kim + * Date: Aug, 2012 + */ +#define FILE_EXTERNAL "nocolcause_extern.data" +static void +test_no_collective_cause_mode(int selection_mode) +{ + uint32_t no_collective_cause_local_write = 0; + uint32_t no_collective_cause_local_read = 0; + uint32_t no_collective_cause_local_expected = 0; + uint32_t no_collective_cause_global_write = 0; + uint32_t no_collective_cause_global_read = 0; + uint32_t no_collective_cause_global_expected = 0; + // hsize_t coord[NELM][MAX_RANK]; + + const char *filename; + const char *test_name; + hbool_t is_chunked = 1; + hbool_t is_independent = 0; + int mpi_size = -1; + int mpi_rank = -1; + int length; + int *buffer; + int i; + MPI_Comm mpi_comm; + MPI_Info mpi_info; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl_id = -1; + hid_t dcpl = -1; + hid_t dxpl_write = -1; + hid_t dxpl_read = -1; + hsize_t dims[MAX_RANK]; + hid_t mem_space = -1; + hid_t file_space = -1; + hsize_t chunk_dims[MAX_RANK]; + herr_t ret; + /* set to global value as default */ + int l_facc_type = facc_type; + char message[256]; + + /* Set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + MPI_Barrier(test_comm); + + HDassert(mpi_size >= 1); + + mpi_comm = test_comm; + mpi_info = MPI_INFO_NULL; + + /* Create the dataset creation plist */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl >= 0), "dataset creation plist created successfully"); + + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { + ret = H5Pset_layout(dcpl, H5D_COMPACT); + VRFY((ret >= 0), "set COMPACT layout succeeded"); + is_chunked = 0; + } + + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { + ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED); + VRFY((ret >= 0), "set EXTERNAL file layout succeeded"); + is_chunked = 0; + } + + if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { + sid = H5Screate(H5S_NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + is_chunked = 0; + } + else { + /* Create the basic Space */ + /* if this is a compact dataset, create a small dataspace that does not exceed 64K */ + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { + dims[0] = BIG_X_FACTOR * 6; + dims[1] = BIG_Y_FACTOR * 6; + } + else { + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + } + sid = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + } + + filename = (const char *)GetTestParameters(); + HDassert(filename != NULL); + + /* Setup the file access template */ + fapl_id = create_faccess_plist(mpi_comm, mpi_info, l_facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist() succeeded"); + + /* Create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* If we are not testing contiguous datasets */ + if (is_chunked) { + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)mpi_size; + chunk_dims[1] = dims[1]; + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); + } + + /* Create the dataset */ + dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); + + /* + * Set expected causes and some tweaks based on the type of test + */ + if (selection_mode & TEST_DATATYPE_CONVERSION) { + test_name = "Broken Collective I/O - Datatype Conversion"; + no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; + no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; + /* set different sign to trigger type conversion */ + data_type = H5T_NATIVE_UINT; + } + + if (selection_mode & TEST_DATA_TRANSFORMS) { + test_name = "Broken Collective I/O - DATA Transforms"; + no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; + no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; + } + + if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { + test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; + no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; + } + + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || + selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { + test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; + no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; + } + + if (selection_mode & TEST_COLLECTIVE) { + test_name = "Broken Collective I/O - Not Broken"; + no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; + no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE; + } + + if (selection_mode & TEST_SET_INDEPENDENT) { + test_name = "Broken Collective I/O - Independent"; + no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; + no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; + /* switch to independent io */ + is_independent = 1; + } + + /* use all spaces for certain tests */ + if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES || + selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { + file_space = H5S_ALL; + mem_space = H5S_ALL; + } + else { + /* Get the file dataspace */ + file_space = H5Dget_space(dataset); + VRFY((file_space >= 0), "H5Dget_space succeeded"); + + /* Create the memory dataspace */ + mem_space = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((mem_space >= 0), "mem_space created"); + } + + /* Get the number of elements in the selection */ + H5_CHECKED_ASSIGN(length, int, dims[0] * dims[1], hsize_t); + + /* Allocate and initialize the buffer */ + buffer = (int *)HDmalloc(sizeof(int) * (size_t)length); + VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); + for (i = 0; i < length; i++) + buffer[i] = i; + + /* Set up the dxpl for the write */ + dxpl_write = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); + + if (is_independent) { + /* Set Independent I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + else { + /* Set Collective I/O */ + ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + } + + if (selection_mode & TEST_DATA_TRANSFORMS) { + ret = H5Pset_data_transform(dxpl_write, "x+1"); + VRFY((ret >= 0), "H5Pset_data_transform succeeded"); + } + + /*--------------------- + * Test Write access + *---------------------*/ + + /* Write */ + ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + + /* Get the cause of broken collective I/O */ + ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write, + &no_collective_cause_global_write); + VRFY((ret >= 0), "retrieving no collective cause succeeded"); + + /*--------------------- + * Test Read access + *---------------------*/ + + /* Make a copy of the dxpl to test the read operation */ + dxpl_read = H5Pcopy(dxpl_write); + VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); + + /* Read */ + ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); + + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); + VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); + + /* Get the cause of broken collective I/O */ + ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read, + &no_collective_cause_global_read); + VRFY((ret >= 0), "retrieving no collective cause succeeded"); + + /* Check write vs read */ + VRFY((no_collective_cause_local_read == no_collective_cause_local_write), + "reading and writing are the same for local cause of Broken Collective I/O"); + VRFY((no_collective_cause_global_read == no_collective_cause_global_write), + "reading and writing are the same for global cause of Broken Collective I/O"); + + /* Test values */ + HDmemset(message, 0, sizeof(message)); + HDsnprintf(message, sizeof(message), + "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name); + VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message); + HDmemset(message, 0, sizeof(message)); + HDsnprintf(message, sizeof(message), + "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name); + VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message); + + /* Release some resources */ + if (sid) + H5Sclose(sid); + if (fapl_id) + H5Pclose(fapl_id); + if (dcpl) + H5Pclose(dcpl); + if (dxpl_write) + H5Pclose(dxpl_write); + if (dxpl_read) + H5Pclose(dxpl_read); + if (dataset) + H5Dclose(dataset); + if (mem_space) + H5Sclose(mem_space); + if (file_space) + H5Sclose(file_space); + if (fid) + H5Fclose(fid); + HDfree(buffer); + + /* clean up external file */ + if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) + HDremove(FILE_EXTERNAL); + + return; +} + +/* Function: no_collective_cause_tests + * + * Purpose: Tests cases for broken collective IO. + * + * Programmer: Jonathan Kim + * Date: Aug, 2012 + */ +void +no_collective_cause_tests(void) +{ + /* + * Test individual cause + */ + test_no_collective_cause_mode(TEST_COLLECTIVE); + test_no_collective_cause_mode(TEST_SET_INDEPENDENT); + test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); + + /* + * Test combined causes + */ + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | + TEST_DATA_TRANSFORMS); + + return; +} + +/* + * Test consistency semantics of atomic mode + */ + +/* + * Example of using the parallel HDF5 library to create a dataset, + * where process 0 writes and the other processes read at the same + * time. If atomic mode is set correctly, the other processes should + * read the old values in the dataset or the new ones. + */ + +void +dataset_atomicity(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t dataset1; /* Dataset IDs */ + hsize_t dims[MAX_RANK]; /* dataset dim sizes */ + int *write_buf = NULL; /* data buffer */ + int *read_buf = NULL; /* data buffer */ + int buf_size; + hid_t dataset2; + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* Memory dataspace ID */ + hsize_t start[MAX_RANK]; + hsize_t stride[MAX_RANK]; + hsize_t count[MAX_RANK]; + hsize_t block[MAX_RANK]; + const char *filename; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + int i, j, k; + hbool_t atomicity = FALSE; + MPI_Comm comm = test_comm; + MPI_Info info = MPI_INFO_NULL; + + dim0 = 64; + dim1 = 32; + filename = GetTestParameters(); + if (facc_type != FACC_MPIO) { + HDprintf("Atomicity tests will not work without the MPIO VFD\n"); + return; + } + if (VERBOSE_MED) + HDprintf("atomic writes to file %s\n", filename); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + buf_size = dim0 * dim1; + /* allocate memory for data buffer */ + write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); + /* allocate memory for data buffer */ + read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* setup dimensionality object */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* create datasets */ + dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); + + dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); + + /* initialize datasets to 0s */ + if (0 == mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + } + + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(sid); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + MPI_Barrier(comm); + + /* make sure setting atomicity fails on a serial file ID */ + /* file locking allows only one file open (serial) for writing */ + if (MAINPROCESS) { + fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + VRFY((fid >= 0), "H5Fopen succeeded"); + } + + /* should fail */ + ret = H5Fset_mpi_atomicity(fid, TRUE); + VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); + + if (MAINPROCESS) { + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + MPI_Barrier(comm); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Fset_mpi_atomicity(fid, TRUE); + VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded"); + + /* open dataset1 (contiguous case) */ + dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT); + VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); + + if (0 == mpi_rank) { + for (i = 0; i < buf_size; i++) { + write_buf[i] = 5; + } + } + else { + for (i = 0; i < buf_size; i++) { + read_buf[i] = 8; + } + } + + /* check that the atomicity flag is set */ + ret = H5Fget_mpi_atomicity(fid, &atomicity); + VRFY((ret >= 0), "atomcity get failed"); + VRFY((atomicity == TRUE), "atomcity set failed"); + + MPI_Barrier(comm); + + /* Process 0 writes contiguously to the entire dataset */ + if (0 == mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); + } + /* The other processes read the entire dataset */ + else { + ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); + VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); + } + + if (VERBOSE_MED) { + i = 0; + j = 0; + k = 0; + for (i = 0; i < dim0; i++) { + HDprintf("\n"); + for (j = 0; j < dim1; j++) + HDprintf("%d ", read_buf[k++]); + } + } + + /* The processes that read the dataset must either read all values + as 0 (read happened before process 0 wrote to dataset 1), or 5 + (read happened after process 0 wrote to dataset 1) */ + if (0 != mpi_rank) { + int compare = read_buf[0]; + + VRFY((compare == 0 || compare == 5), + "Atomicity Test Failed Process %d: Value read should be 0 or 5\n"); + for (i = 1; i < buf_size; i++) { + if (read_buf[i] != compare) { + HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, + read_buf[i], compare); + nerrors++; + } + } + } + + ret = H5Dclose(dataset1); + VRFY((ret >= 0), "H5D close succeeded"); + + /* release data buffers */ + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + + /* open dataset2 (non-contiguous case) */ + dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT); + VRFY((dataset2 >= 0), "H5Dopen2 succeeded"); + + /* allocate memory for data buffer */ + write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); + /* allocate memory for data buffer */ + read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); + VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); + + for (i = 0; i < buf_size; i++) { + write_buf[i] = 5; + } + for (i = 0; i < buf_size; i++) { + read_buf[i] = 8; + } + + atomicity = FALSE; + /* check that the atomicity flag is set */ + ret = H5Fget_mpi_atomicity(fid, &atomicity); + VRFY((ret >= 0), "atomcity get failed"); + VRFY((atomicity == TRUE), "atomcity set failed"); + + block[0] = (hsize_t)(dim0 / mpi_size) - 1; + block[1] = (hsize_t)(dim1 / mpi_size) - 1; + stride[0] = block[0] + 1; + stride[1] = block[1] + 1; + count[0] = (hsize_t)mpi_size; + count[1] = (hsize_t)mpi_size; + start[0] = 0; + start[1] = 0; + + /* create a file dataspace */ + file_dataspace = H5Dget_space(dataset2); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace */ + mem_dataspace = H5Screate_simple(MAX_RANK, dims, NULL); + VRFY((mem_dataspace >= 0), ""); + + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + MPI_Barrier(comm); + + /* Process 0 writes to the dataset */ + if (0 == mpi_rank) { + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf); + VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); + } + /* All processes wait for the write to finish. This works because + atomicity is set to true */ + MPI_Barrier(comm); + /* The other processes read the entire dataset */ + if (0 != mpi_rank) { + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf); + VRFY((ret >= 0), "H5Dread dataset2 succeeded"); + } + + if (VERBOSE_MED) { + if (mpi_rank == 1) { + i = 0; + j = 0; + k = 0; + for (i = 0; i < dim0; i++) { + HDprintf("\n"); + for (j = 0; j < dim1; j++) + HDprintf("%d ", read_buf[k++]); + } + HDprintf("\n"); + } + } + + /* The processes that read the dataset must either read all values + as 5 (read happened after process 0 wrote to dataset 1) */ + if (0 != mpi_rank) { + int compare; + i = 0; + j = 0; + k = 0; + + compare = 5; + + for (i = 0; i < dim0; i++) { + if ((hsize_t)i >= (hsize_t)mpi_rank * (block[0] + 1)) { + break; + } + if (((hsize_t)i + 1) % (block[0] + 1) == 0) { + k += dim1; + continue; + } + for (j = 0; j < dim1; j++) { + if ((hsize_t)j >= (hsize_t)mpi_rank * (block[1] + 1)) { + H5_CHECKED_ASSIGN(k, int, (hsize_t)dim1 - (hsize_t)mpi_rank * (block[1] + 1) + (hsize_t)k, + hsize_t); + break; + } + if (((hsize_t)j + 1) % (block[1] + 1) == 0) { + k++; + continue; + } + else if (compare != read_buf[k]) { + HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, + k, read_buf[k], compare); + nerrors++; + } + k++; + } + } + } + + ret = H5Dclose(dataset2); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Sclose(file_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Sclose(mem_dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + + /* release data buffers */ + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); +} + +/* Function: dense_attr_test + * + * Purpose: Test cases for writing dense attributes in parallel + * + * Programmer: Quincey Koziol + * Date: April, 2013 + */ +void +test_dense_attr(void) +{ + int mpi_size, mpi_rank; + hid_t fpid, fid; + hid_t gid, gpid; + hid_t atFileSpace, atid; + hsize_t atDims[1] = {10000}; + herr_t status; + const char *filename; + + /* get filename */ + filename = (const char *)GetTestParameters(); + HDassert(filename != NULL); + + /* set up MPI parameters */ + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + fpid = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fpid > 0), "H5Pcreate succeeded"); + status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); + VRFY((status >= 0), "H5Pset_libver_bounds succeeded"); + status = H5Pset_fapl_mpio(fpid, test_comm, MPI_INFO_NULL); + VRFY((status >= 0), "H5Pset_fapl_mpio succeeded"); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid); + VRFY((fid > 0), "H5Fcreate succeeded"); + status = H5Pclose(fpid); + VRFY((status >= 0), "H5Pclose succeeded"); + + gpid = H5Pcreate(H5P_GROUP_CREATE); + VRFY((gpid > 0), "H5Pcreate succeeded"); + status = H5Pset_attr_phase_change(gpid, 0, 0); + VRFY((status >= 0), "H5Pset_attr_phase_change succeeded"); + gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT); + VRFY((gid > 0), "H5Gcreate2 succeeded"); + status = H5Pclose(gpid); + VRFY((status >= 0), "H5Pclose succeeded"); + + atFileSpace = H5Screate_simple(1, atDims, NULL); + VRFY((atFileSpace > 0), "H5Screate_simple succeeded"); + atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT); + VRFY((atid > 0), "H5Acreate succeeded"); + status = H5Sclose(atFileSpace); + VRFY((status >= 0), "H5Sclose succeeded"); + + status = H5Aclose(atid); + VRFY((status >= 0), "H5Aclose succeeded"); + + status = H5Gclose(gid); + VRFY((status >= 0), "H5Gclose succeeded"); + status = H5Fclose(fid); + VRFY((status >= 0), "H5Fclose succeeded"); + + return; +} + +int +main(int argc, char **argv) +{ + int express_test; + int mpi_size, mpi_rank; /* mpi variables */ + hsize_t oldsize, newsize = 1048576; + +#ifndef H5_HAVE_WIN32_API + /* Un-buffer the stdout and stderr */ + HDsetbuf(stderr, NULL); + HDsetbuf(stdout, NULL); +#endif + + MPI_Init(&argc, &argv); + MPI_Comm_size(test_comm, &mpi_size); + MPI_Comm_rank(test_comm, &mpi_rank); + + HDmemset(filenames, 0, sizeof(filenames)); + + dim0 = BIG_X_FACTOR; + dim1 = BIG_Y_FACTOR; + dim2 = BIG_Z_FACTOR; + + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("2 GByte IO TESTS START\n"); + HDprintf("2 MPI ranks will run the tests...\n"); + HDprintf("===================================\n"); + h5_show_hostname(); + } + + if (H5dont_atexit() < 0) { + HDprintf("Failed to turn off atexit processing. Continue.\n"); + }; + H5open(); + + HDmemset(filenames, 0, sizeof(filenames)); + for (int i = 0; i < NFILENAME; i++) { + if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) { + HDprintf("couldn't allocate filename array\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } + + /* Set the internal transition size to allow use of derived datatypes + * without having to actually read or write large datasets (>2GB). + */ + oldsize = H5_mpi_set_bigio_count(newsize); + + if (mpi_size > 2) { + int rank_color = 0; + if (mpi_rank >= 2) + rank_color = 1; + if (MPI_Comm_split(test_comm, rank_color, mpi_rank, &test_comm) != MPI_SUCCESS) { + HDprintf("MPI returned an error. Exiting\n"); + } + } + + /* Initialize testing framework */ + if (mpi_rank < 2) { + TestInit(argv[0], usage, parse_options); + + /* Parse command line arguments */ + TestParseCmdLine(argc, argv); + + AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); + + AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); + + AddTest("cdsetw", dataset_writeAll, NULL, "dataset collective write", PARATESTFILE); + + AddTest("cdsetr", dataset_readAll, NULL, "dataset collective read", PARATESTFILE); + + AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE); + + AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE); + +#ifdef H5_HAVE_FILTER_DEFLATE + AddTest("cmpdsetr", compress_readAll, NULL, "compressed dataset collective read", PARATESTFILE); +#endif /* H5_HAVE_FILTER_DEFLATE */ + + /* Display testing information */ + if (MAINPROCESS) + TestInfo(argv[0]); + + /* setup file access property list */ + fapl = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL); + + /* Perform requested testing */ + PerformTests(); + } + + MPI_Barrier(MPI_COMM_WORLD); + + /* Restore the default bigio setting */ + H5_mpi_set_bigio_count(oldsize); + + express_test = GetTestExpress(); + if ((express_test == 0) && (mpi_rank < 2)) { + MpioTest2G(test_comm); + } + + MPI_Barrier(MPI_COMM_WORLD); + + if (mpi_rank == 0) + HDremove(FILENAME[0]); + + for (int i = 0; i < NFILENAME; i++) { + HDfree(filenames[i]); + filenames[i] = NULL; + } + + H5close(); + if (test_comm != MPI_COMM_WORLD) { + MPI_Comm_free(&test_comm); + } + MPI_Finalize(); + return 0; +} diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c new file mode 100644 index 0000000..1c66748 --- /dev/null +++ b/testpar/t_bigio.c @@ -0,0 +1,1926 @@ + +#include "hdf5.h" +#include "testphdf5.h" +#include "H5Dprivate.h" /* For Chunk tests */ + +/* FILENAME and filenames must have the same number of names */ +const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL}; + +/* Constants definitions */ +#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ + +/* Define some handy debugging shorthands, routines, ... */ +/* debugging tools */ + +#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */ + +/* Constants definitions */ +#define RANK 2 + +#define IN_ORDER 1 +#define OUT_OF_ORDER 2 + +#define DATASET1 "DSET1" +#define DATASET2 "DSET2" +#define DATASET3 "DSET3" +#define DATASET4 "DSET4" +#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ +#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ +#define DXFER_BIGCOUNT (1 < 29) + +#define HYPER 1 +#define POINT 2 +#define ALL 3 + +/* Dataset data type. Int's can be easily octo dumped. */ +typedef hsize_t B_DATATYPE; + +int facc_type = FACC_MPIO; /*Test file access type */ +int dxfer_coll_type = DXFER_COLLECTIVE_IO; +size_t bigcount = (size_t)DXFER_BIGCOUNT; +int nerrors = 0; +static int mpi_size_g, mpi_rank_g; + +hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096 +hsize_t space_dim2 = SPACE_DIM2; + +static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, + int file_selection, int mem_selection, int mode); + +/* + * Setup the coordinates for point selection. + */ +static void +set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + if (OUT_OF_ORDER == order) + k = (num_points * RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2 and data is stored contiguous. + */ +static void +fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) +{ + B_DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* put some trivial data in the data_array */ + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); + dataptr++; + } + } +} + +/* + * Setup the coordinates for point selection. + */ +void +point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) +{ + hsize_t i, j, k = 0, m, n, s1, s2; + + HDcompile_assert(RANK == 2); + + if (OUT_OF_ORDER == order) + k = (num_points * RANK) - 1; + else if (IN_ORDER == order) + k = 0; + + s1 = start[0]; + s2 = start[1]; + + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { + coords[k--] = s2 + (stride[1] * j) + n; + coords[k--] = s1 + (stride[0] * i) + m; + } + else if (IN_ORDER == order) { + coords[k++] = s1 + stride[0] * i + m; + coords[k++] = s2 + stride[1] * j + n; + } + + if (VERBOSE_MED) { + HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "total datapoints=%" PRIuHSIZE "\n", + start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1], + block[0] * block[1] * count[0] * count[1]); + k = 0; + for (i = 0; i < num_points; i++) { + HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + k += 2; + } + } +} + +/* + * Print the content of the dataset. + */ +static void +dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset) +{ + B_DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3" PRIuHSIZE " ", start[1] + j); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]); + for (j = 0; j < block[1]; j++) { + HDprintf("%" PRIuHSIZE " ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +static int +verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset, + B_DATATYPE *original) +{ + hsize_t i, j; + int vrfyerrs; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("verify_data dumping:::\n"); + HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "count(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), " + "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n", + start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]); + HDprintf("original values:\n"); + dataset_print(start, block, original); + HDprintf("compared values:\n"); + dataset_print(start, block, dataset); + } + + vrfyerrs = 0; + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + if (*dataset != *original) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]" + "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): " + "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n", + i, j, i + start[0], j + start[1], *(original), *(dataset)); + } + dataset++; + original++; + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in verify_data\n", vrfyerrs); + return (vrfyerrs); +} + +/* Set up the selection */ +static void +ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) +{ + + switch (mode) { + + case BYROW_CONT: + /* Each process takes a slabs of rows. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = space_dim1; + count[1] = space_dim2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_DISCONT: + /* Each process takes several disjoint blocks. */ + block[0] = 1; + block[1] = 1; + stride[0] = 3; + stride[1] = 3; + count[0] = space_dim1 / (stride[0] * block[0]); + count[1] = (space_dim2) / (stride[1] * block[1]); + start[0] = space_dim1 * (hsize_t)mpi_rank; + start[1] = 0; + + break; + + case BYROW_SELECTNONE: + /* Each process takes a slabs of rows, there are + no selections for the last process. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1); + count[1] = space_dim2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_SELECTUNBALANCE: + /* The first one-third of the number of processes only + select top half of the domain, The rest will select the bottom + half of the domain. */ + + block[0] = 1; + count[0] = 2; + stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1); + block[1] = space_dim2; + count[1] = 1; + start[1] = 0; + stride[1] = 1; + if ((mpi_rank * 3) < (mpi_size * 2)) + start[0] = (hsize_t)mpi_rank; + else + start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3); + break; + + case BYROW_SELECTINCHUNK: + /* Each process will only select one chunk */ + + block[0] = 1; + count[0] = 1; + start[0] = (hsize_t)mpi_rank * space_dim1; + stride[0] = 1; + block[1] = space_dim2; + count[1] = 1; + stride[1] = 1; + start[1] = 0; + + break; + + default: + /* Unknown mode. Set it to cover the whole dataset. */ + block[0] = space_dim1 * (hsize_t)mpi_size; + block[1] = space_dim2; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + + break; + } + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); + } +} + +/* + * Fill the dataset with trivial data for testing. + * Assume dimension rank is 2. + */ +static void +ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset, + int mem_selection) +{ + DATATYPE *dataptr = dataset; + DATATYPE *tmptr; + hsize_t i, j, k1, k2, k = 0; + /* put some trivial data in the data_array */ + tmptr = dataptr; + + /* assign the disjoint block (two-dimensional)data array value + through the pointer */ + + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { + + if (ALL != mem_selection) { + dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = tmptr + k; + k++; + } + + *dataptr = (DATATYPE)(k1 + k2 + i + j); + } + } + } + } +} + +/* + * Print the first block of the content of the dataset. + */ +static void +ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) + +{ + DATATYPE *dataptr = dataset; + hsize_t i, j; + + /* print the column heading */ + HDprintf("Print only the first block of the dataset\n"); + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); + } + HDprintf("\n"); + + /* print the slab data */ + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); + } +} + +/* + * Print the content of the dataset. + */ +static int +ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original, int mem_selection) +{ + hsize_t i, j, k1, k2, k = 0; + int vrfyerrs; + DATATYPE *dataptr, *oriptr; + + /* print it if VERBOSE_MED */ + if (VERBOSE_MED) { + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + ccdataset_print(start, block, original); + HDprintf("compared values:\n"); + ccdataset_print(start, block, dataset); + } + + vrfyerrs = 0; + + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { + if (ALL != mem_selection) { + dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + + k2 * stride[1] + j); + oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = dataset + k; + oriptr = original + k; + k++; + } + if (*dataptr != *oriptr) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); + } + } + } + } + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs); + return (vrfyerrs); +} + +/* + * Example of using the parallel HDF5 library to create two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +static void +dataset_big_write(void) +{ + + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset; + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t *coords = NULL; + herr_t ret; /* Generic return value */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + size_t num_points; + B_DATATYPE *wdata; + + /* allocate memory for data buffer */ + wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE)); + VRFY_G((wdata != NULL), "wdata malloc succeeded"); + + /* setup file access template */ + acc_tpl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS"); + H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL); + + /* create the file collectively */ + fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY_G((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY_G((ret >= 0), ""); + + /* Each process takes a slabs of rows. */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset1 write by ROW\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + block[0] = dims[0] / (hsize_t)mpi_size_g; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank_g * block[0]; + start[1] = 0; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + /* Each process takes a slabs of cols. */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset2 write by COL\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + block[0] = dims[0]; + block[1] = dims[1] / (hsize_t)mpi_size_g; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank_g * block[1]; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill the local slab with some trivial data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + /* ALL selection */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = 1; + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + if (mpi_rank_g == 0) { + ret = H5Sselect_all(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_all succeeded"); + } + else { + ret = H5Sselect_none(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + if (mpi_rank_g != 0) { + ret = H5Sselect_none(mem_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* fill the local slab with some trivial data */ + fill_datasets(start, dims, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + /* Point selection */ + if (mpi_rank_g == 0) + HDprintf("\nTesting Dataset4 write point selection\n"); + /* Create a large dataset */ + dims[0] = bigcount; + dims[1] = (hsize_t)(mpi_size_g * 4); + + sid = H5Screate_simple(RANK, dims, NULL); + VRFY_G((sid >= 0), "H5Screate_simple succeeded"); + dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dcreate2 succeeded"); + H5Sclose(sid); + + block[0] = dims[0] / 2; + block[1] = 2; + stride[0] = dims[0] / 2; + stride[1] = 2; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g; + + num_points = bigcount; + + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY_G((coords != NULL), "coords malloc succeeded"); + + set_coords(start, count, stride, block, num_points, coords, IN_ORDER); + /* create a file dataspace */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((ret >= 0), "H5Sselect_elements succeeded"); + + if (coords) + free(coords); + + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + /* create a memory dataspace */ + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + mem_dataspace = H5Screate_simple(1, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata); + VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded"); + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + HDfree(wdata); + H5Fclose(fid); +} + +/* + * Example of using the parallel HDF5 library to read two datasets + * in one HDF5 file with collective parallel access support. + * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. + * Each process controls only a slab of size dim0 x dim1 within each + * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and + * each process controls a hyperslab within.] + */ + +static void +dataset_big_read(void) +{ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset; + B_DATATYPE *rdata = NULL; /* data buffer */ + B_DATATYPE *wdata = NULL; /* expected data buffer */ + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + size_t num_points; + hsize_t *coords = NULL; + herr_t ret; /* Generic return value */ + + /* allocate memory for data buffer */ + rdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE)); + VRFY_G((rdata != NULL), "rdata malloc succeeded"); + wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE)); + VRFY_G((wdata != NULL), "wdata malloc succeeded"); + + HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE)); + + /* setup file access template */ + acc_tpl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS"); + H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL); + + /* open the file collectively */ + fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl); + VRFY_G((fid >= 0), "H5Fopen succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY_G((ret >= 0), ""); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset1 by COL\n"); + + dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + /* Each process takes a slabs of cols. */ + block[0] = dims[0]; + block[1] = dims[1] / (hsize_t)mpi_size_g; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank_g * block[1]; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset1 succeeded"); + + /* verify the read data with original expected data */ + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset2 by ROW\n"); + HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE)); + dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = (hsize_t)mpi_size_g; + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size_g; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank_g * block[0]; + start[1] = 0; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, block, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* fill dataset with test data */ + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset2 succeeded"); + + /* verify the read data with original expected data */ + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n"); + HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE)); + dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = 1; + + /* create a file dataspace independently */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + if (mpi_rank_g == 0) { + ret = H5Sselect_all(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_all succeeded"); + } + else { + ret = H5Sselect_none(file_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(RANK, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + if (mpi_rank_g != 0) { + ret = H5Sselect_none(mem_dataspace); + VRFY_G((ret >= 0), "H5Sset_none succeeded"); + } + + /* fill dataset with test data */ + fill_datasets(start, dims, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + } + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset3 succeeded"); + + if (mpi_rank_g == 0) { + /* verify the read data with original expected data */ + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + if (mpi_rank_g == 0) + HDprintf("\nRead Testing Dataset4 with Point selection\n"); + dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT); + VRFY_G((dataset >= 0), "H5Dopen2 succeeded"); + + dims[0] = bigcount; + dims[1] = (hsize_t)(mpi_size_g * 4); + + block[0] = dims[0] / 2; + block[1] = 2; + stride[0] = dims[0] / 2; + stride[1] = 2; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g; + + fill_datasets(start, block, wdata); + MESG("data_array initialized"); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, wdata); + } + + num_points = bigcount; + + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY_G((coords != NULL), "coords malloc succeeded"); + + set_coords(start, count, stride, block, num_points, coords, IN_ORDER); + /* create a file dataspace */ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded"); + ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((ret >= 0), "H5Sselect_elements succeeded"); + + if (coords) + HDfree(coords); + + /* create a memory dataspace */ + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + mem_dataspace = H5Screate_simple(1, dims, NULL); + VRFY_G((mem_dataspace >= 0), ""); + + /* set up the collective transfer properties list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((ret >= 0), "H5Pcreate xfer succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((ret >= 0), "set independent IO collectively succeeded"); + } + + /* read data collectively */ + ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata); + VRFY_G((ret >= 0), "H5Dread dataset1 succeeded"); + + ret = verify_data(start, count, stride, block, rdata, wdata); + if (ret) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + /* release all temporary handles. */ + H5Sclose(file_dataspace); + H5Sclose(mem_dataspace); + H5Pclose(xfer_plist); + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + + HDfree(wdata); + HDfree(rdata); + + wdata = NULL; + rdata = NULL; + /* We never wrote Dataset5 in the write section, so we can't + * expect to read it... + */ + file_dataspace = -1; + mem_dataspace = -1; + xfer_plist = -1; + dataset = -1; + + /* release all temporary handles. */ + if (file_dataspace != -1) + H5Sclose(file_dataspace); + if (mem_dataspace != -1) + H5Sclose(mem_dataspace); + if (xfer_plist != -1) + H5Pclose(xfer_plist); + if (dataset != -1) { + ret = H5Dclose(dataset); + VRFY_G((ret >= 0), "H5Dclose1 succeeded"); + } + H5Fclose(fid); + + /* release data buffers */ + if (rdata) + HDfree(rdata); + if (wdata) + HDfree(wdata); + +} /* dataset_large_readAll */ + +static void +single_rank_independent_io(void) +{ + if (mpi_rank_g == 0) + HDprintf("\nSingle Rank Independent I/O\n"); + + if (MAIN_PROCESS) { + hsize_t dims[1]; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dset_id = -1; + hid_t fspace_id = -1; + herr_t ret; + int *data = NULL; + uint64_t i; + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS"); + + H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); + file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY_G((file_id >= 0), "H5Dcreate2 succeeded"); + + /* + * Calculate the number of elements needed to exceed + * MPI's INT_MAX limitation + */ + dims[0] = (INT_MAX / sizeof(int)) + 10; + + fspace_id = H5Screate_simple(1, dims, NULL); + VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded"); + + /* + * Create and write to a >2GB dataset from a single rank. + */ + dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT); + + VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded"); + + data = malloc(dims[0] * sizeof(int)); + + /* Initialize data */ + for (i = 0; i < dims[0]; i++) + data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT); + + /* Write data */ + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data); + VRFY_G((ret >= 0), "H5Dwrite succeeded"); + + /* Wipe buffer */ + HDmemset(data, 0, dims[0] * sizeof(int)); + + /* Read data back */ + ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data); + VRFY_G((ret >= 0), "H5Dread succeeded"); + + /* Verify data */ + for (i = 0; i < dims[0]; i++) + if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) { + HDfprintf(stderr, "verify failed\n"); + exit(1); + } + + free(data); + H5Sclose(fspace_id); + H5Pclose(fapl_id); + H5Dclose(dset_id); + H5Fclose(file_id); + } + MPI_Barrier(MPI_COMM_WORLD); +} + +/* + * Create the appropriate File access property list + */ +hid_t +create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) +{ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ + + /* need the rank for error checking macros */ + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS"); + + if (l_facc_type == FACC_DEFAULT) + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY_G((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY_G((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY_G((ret >= 0), ""); + return (ret_pl); + } + + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY_G((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY_G((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); + } + + /* unknown file access types */ + return (ret_pl); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk1 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with a single chunk + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: One big singular selection inside one chunk + * Two dimensions, + * + * dim1 = space_dim1(5760)*mpi_size + * dim2 = space_dim2(3) + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = space_dim1(5760) + * count1 = space_dim2(3) + * start0 = mpi_rank*space_dim1 + * start1 = 0 + * ------------------------------------------------------------------------ + */ + +void +coll_chunk1(void) +{ + const char *filename = FILENAME[0]; + if (mpi_rank_g == 0) + HDprintf("\nCollective chunk I/O Test #1\n"); + + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk2 + * + * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT + selection with a single chunk + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: many disjoint selections inside one chunk + * Two dimensions, + * + * dim1 = space_dim1*mpi_size(5760) + * dim2 = space_dim2(3) + * chunk_dim1 = dim1 + * chunk_dim2 = dim2 + * block = 1 for all dimensions + * stride = 3 for all dimensions + * count0 = space_dim1/stride0(5760/3) + * count1 = space_dim2/stride(3/3 = 1) + * start0 = mpi_rank*space_dim1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ +void +coll_chunk2(void) +{ + const char *filename = FILENAME[0]; + if (mpi_rank_g == 0) + HDprintf("\nCollective chunk I/O Test #2\n"); + + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +/*------------------------------------------------------------------------- + * Function: coll_chunk3 + * + * Purpose: Wrapper to test the collective chunk IO for regular JOINT + selection with at least number of 2*mpi_size chunks + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + *------------------------------------------------------------------------- + */ + +/* ------------------------------------------------------------------------ + * Descriptions for the selection: one singular selection across many chunks + * Two dimensions, Num of chunks = 2* mpi_size + * + * dim1 = space_dim1*mpi_size + * dim2 = space_dim2(3) + * chunk_dim1 = space_dim1 + * chunk_dim2 = dim2/2 + * block = 1 for all dimensions + * stride = 1 for all dimensions + * count0 = space_dim1 + * count1 = space_dim2(3) + * start0 = mpi_rank*space_dim1 + * start1 = 0 + * + * ------------------------------------------------------------------------ + */ + +void +coll_chunk3(void) +{ + const char *filename = FILENAME[0]; + if (mpi_rank_g == 0) + HDprintf("\nCollective chunk I/O Test #3\n"); + + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER); + + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER); + coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); +} + +//------------------------------------------------------------------------- +// Borrowed/Modified (slightly) from t_coll_chunk.c +/*------------------------------------------------------------------------- + * Function: coll_chunktest + * + * Purpose: The real testing routine for regular selection of collective + chunking storage + testing both write and read, + If anything fails, it may be read or write. There is no + separation test between read and write. + * + * Return: Success: 0 + * + * Failure: -1 + * + * Programmer: Unknown + * July 12th, 2004 + * + *------------------------------------------------------------------------- + */ + +static void +coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection, + int mem_selection, int mode) +{ + hid_t file, dataset, file_dataspace, mem_dataspace; + hid_t acc_plist, xfer_plist, crp_plist; + + hsize_t dims[RANK], chunk_dims[RANK]; + int *data_array1 = NULL; + int *data_origin1 = NULL; + + hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + unsigned prop_value; +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + herr_t status; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + + /* Create the data space */ + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY_G((acc_plist >= 0), ""); + + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist); + VRFY_G((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY_G((status >= 0), ""); + + /* setup dimensionality object */ + dims[0] = space_dim1 * (hsize_t)mpi_size_g; + dims[1] = space_dim2; + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor); + + /* set up the coords array selection */ + num_points = block[0] * block[1] * count[0] * count[1]; + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY_G((coords != NULL), "coords malloc succeeded"); + point_set(start, count, stride, block, num_points, coords, mode); + + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + file_dataspace = H5Screate_simple(2, dims, NULL); + VRFY_G((file_dataspace >= 0), "file dataspace created succeeded"); + + if (ALL != mem_selection) { + mem_dataspace = H5Screate_simple(2, dims, NULL); + VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded"); + } + else { + /* Putting the warning about H5Screate_simple (above) into practice... */ + hsize_t dsdims[1] = {num_points}; + mem_dataspace = H5Screate_simple(1, dsdims, NULL); + VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + crp_plist = H5Pcreate(H5P_DATASET_CREATE); + VRFY_G((crp_plist >= 0), ""); + + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)chunk_factor; + + /* to decrease the testing time, maintain bigger chunk size */ + (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2); + status = H5Pset_chunk(crp_plist, 2, chunk_dims); + VRFY_G((status >= 0), "chunk creation property list succeeded"); + + dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT, + crp_plist, H5P_DEFAULT); + VRFY_G((dataset >= 0), "dataset created succeeded"); + + status = H5Pclose(crp_plist); + VRFY_G((status >= 0), ""); + + /*put some trivial data in the data array */ + ccdataset_fill(start, stride, count, block, data_array1, mem_selection); + + MESG("data_array initialized"); + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* set up the collective transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((status >= 0), "set independent IO collectively succeeded"); + } + + switch (api_option) { + case API_LINK_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO); + VRFY_G((status >= 0), "collective chunk optimization succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY_G((status >= 0), "collective chunk optimization succeeded "); + break; + + case API_LINK_TRUE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2); + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6); + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50); + VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + case API_MULTI_IND: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100); + VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + default:; + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (facc_type == FACC_MPIO) { + switch (api_option) { + case API_LINK_HARD: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_HARD: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_LINK_TRUE: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_LINK_FALSE: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_COLL: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, + H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + case API_MULTI_IND: + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY_G((status >= 0), "testing property list inserted succeeded"); + break; + + default:; + } + } +#endif + + /* write data collectively */ + status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY_G((status >= 0), "dataset write succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (facc_type == FACC_MPIO) { + switch (api_option) { + case API_LINK_HARD: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); + break; + + case API_LINK_TRUE: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); + break; + + case API_MULTI_IND: + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value); + VRFY_G((status >= 0), "testing property list get succeeded"); + VRFY_G((prop_value == 0), + "API to set MULTI-CHUNK IO transferring to independent IO succeeded"); + break; + + default:; + } + } +#endif + + status = H5Dclose(dataset); + VRFY_G((status >= 0), ""); + + status = H5Pclose(xfer_plist); + VRFY_G((status >= 0), "property list closed"); + + status = H5Sclose(file_dataspace); + VRFY_G((status >= 0), ""); + + status = H5Sclose(mem_dataspace); + VRFY_G((status >= 0), ""); + + status = H5Fclose(file); + VRFY_G((status >= 0), ""); + + if (data_array1) + HDfree(data_array1); + + /* Use collective read to verify the correctness of collective write. */ + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* allocate memory for data buffer */ + data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded"); + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded"); + + file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist); + VRFY_G((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY_G((status >= 0), ""); + + /* open the collective dataset*/ + dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); + VRFY_G((dataset >= 0), ""); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor); + + /* obtain the file and mem dataspace*/ + file_dataspace = H5Dget_space(dataset); + VRFY_G((file_dataspace >= 0), ""); + + if (ALL != mem_selection) { + mem_dataspace = H5Dget_space(dataset); + VRFY_G((mem_dataspace >= 0), ""); + } + else { + /* Warning: H5Screate_simple requires an array of hsize_t elements + * even if we only pass only a single value. Attempting anything else + * appears to cause problems with 32 bit compilers. + */ + hsize_t dsdims[1] = {num_points}; + mem_dataspace = H5Screate_simple(1, dsdims, NULL); + VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY_G((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY_G((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY_G((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY_G((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* fill dataset with test data */ + ccdataset_fill(start, stride, count, block, data_origin1, mem_selection); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY_G((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY_G((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY_G((status >= 0), "set independent IO collectively succeeded"); + } + + status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY_G((status >= 0), "dataset read succeeded"); + + /* verify the read data with original expected data */ + status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); + if (status) + nerrors++; + + status = H5Pclose(xfer_plist); + VRFY_G((status >= 0), "property list closed"); + + /* close dataset collectively */ + status = H5Dclose(dataset); + VRFY_G((status >= 0), "H5Dclose"); + + /* release all IDs created */ + status = H5Sclose(file_dataspace); + VRFY_G((status >= 0), "H5Sclose"); + + status = H5Sclose(mem_dataspace); + VRFY_G((status >= 0), "H5Sclose"); + + /* close the file collectively */ + status = H5Fclose(file); + VRFY_G((status >= 0), "H5Fclose"); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} + +int +main(int argc, char **argv) +{ + hsize_t newsize = 1048576; + /* Set the bigio processing limit to be 'newsize' bytes */ + hsize_t oldsize = H5_mpi_set_bigio_count(newsize); + + /* Having set the bigio handling to a size that is manageable, + * we'll set our 'bigcount' variable to be 2X that limit so + * that we try to ensure that our bigio handling is actually + * envoked and tested. + */ + if (newsize != oldsize) + bigcount = newsize * 2; + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g); + + /* Attempt to turn off atexit post processing so that in case errors + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ + if (H5dont_atexit() < 0) + HDprintf("Failed to turn off atexit processing. Continue.\n"); + + /* set alarm. */ + TestAlarmOn(); + + dataset_big_write(); + MPI_Barrier(MPI_COMM_WORLD); + + dataset_big_read(); + MPI_Barrier(MPI_COMM_WORLD); + + coll_chunk1(); + MPI_Barrier(MPI_COMM_WORLD); + coll_chunk2(); + MPI_Barrier(MPI_COMM_WORLD); + coll_chunk3(); + MPI_Barrier(MPI_COMM_WORLD); + + /* + * Reset big count for the next test, as it + * doesn't use the functionality in the same + * way as the previous tests. + */ + H5_mpi_set_bigio_count(oldsize); + single_rank_independent_io(); + + /* turn off alarm */ + TestAlarmOff(); + + if (mpi_rank_g == 0) { + hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS); + + H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL); + + H5E_BEGIN_TRY + { + H5Fdelete(FILENAME[0], fapl_id); + H5Fdelete(FILENAME[1], fapl_id); + } + H5E_END_TRY; + + H5Pclose(fapl_id); + } + + /* close HDF5 library */ + H5close(); + + MPI_Finalize(); + + return 0; +} diff --git a/testpar/t_cache.c b/testpar/t_cache.c index f526a8b..ae47a6f 100644 --- a/testpar/t_cache.c +++ b/testpar/t_cache.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -18,182 +15,183 @@ * */ -#include "h5test.h" #include "testpar.h" -#include "H5Iprivate.h" -#include "H5ACprivate.h" - -#define H5C_PACKAGE /*suppress error about including H5Cpkg */ - -#include "H5Cpkg.h" -#define H5AC_PACKAGE /*suppress error about including H5ACpkg */ +#define H5AC_FRIEND /*suppress error about including H5ACpkg */ +#define H5C_FRIEND /*suppress error about including H5Cpkg */ +#define H5F_FRIEND /*suppress error about including H5Fpkg */ #include "H5ACpkg.h" - -#define H5F_PACKAGE /*suppress error about including H5Fpkg */ - +#include "H5Cpkg.h" +#include "H5CXprivate.h" #include "H5Fpkg.h" +#include "H5Iprivate.h" +#include "H5MFprivate.h" +#include "H5private.h" +#define BASE_ADDR (haddr_t)1024 -int nerrors = 0; -int failures = 0; -hbool_t verbose = TRUE; /* used to control error messages */ -#if 0 -/* So far we haven't needed this, but that may change. - * Keep it around for now - */ -hid_t noblock_dxpl_id=(-1); -#endif +int nerrors = 0; +int failures = 0; +hbool_t verbose = TRUE; /* used to control error messages */ #define NFILENAME 2 -#define PARATESTFILE filenames[0] -const char *FILENAME[NFILENAME]={"CacheTestDummy", NULL}; +const char *FILENAME[NFILENAME] = {"CacheTestDummy", NULL}; #ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ -char filenames[NFILENAME][PATH_MAX]; -hid_t fapl; /* file access property list */ - - -int world_mpi_size = -1; -int world_mpi_rank = -1; -int world_server_mpi_rank = -1; -MPI_Comm world_mpi_comm = MPI_COMM_NULL; -int file_mpi_size = -1; -int file_mpi_rank = -1; -MPI_Comm file_mpi_comm = MPI_COMM_NULL; - +#define PATH_MAX 512 +#endif /* !PATH_MAX */ +char *filenames[NFILENAME]; +hid_t fapl; /* file access property list */ +haddr_t max_addr = 0; /* used to store the end of + * the address space used by + * the data array (see below). + */ +hbool_t callbacks_verbose = FALSE; /* flag used to control whether + * the callback functions are in + * verbose mode. + */ + +int world_mpi_size = -1; +int world_mpi_rank = -1; +int world_server_mpi_rank = -1; +MPI_Comm world_mpi_comm = MPI_COMM_NULL; +int file_mpi_size = -1; +int file_mpi_rank = -1; +MPI_Comm file_mpi_comm = MPI_COMM_NULL; /* the following globals are used to maintain rudementary statistics * to check the validity of the statistics maintained by H5C.c */ -long datum_clears = 0; -long datum_pinned_clears = 0; -long datum_destroys = 0; -long datum_flushes = 0; -long datum_pinned_flushes = 0; -long datum_loads = 0; -long global_pins = 0; -long global_dirty_pins = 0; -long local_pins = 0; - +long datum_clears = 0; +long datum_pinned_clears = 0; +long datum_destroys = 0; +long datum_flushes = 0; +long datum_pinned_flushes = 0; +long datum_loads = 0; +long global_pins = 0; +long global_dirty_pins = 0; +long local_pins = 0; /* the following fields are used by the server process only */ -int total_reads = 0; -int total_writes = 0; - +int total_reads = 0; +int total_writes = 0; /***************************************************************************** * struct datum * - * Instances of struct datum are used to store information on entries - * that may be loaded into the cache. The individual fields are - * discussed below: + * Instances of struct datum are used to store information on entries + * that may be loaded into the cache. The individual fields are + * discussed below: + * + * header: Instance of H5C_cache_entry_t used by the for its data. + * This field is only used on the file processes, not on the + * server process. * - * header: Instance of H5C_cache_entry_t used by the for its data. - * This field is only used on the file processes, not on the - * server process. + * This field MUST be the first entry in this structure. * - * This field MUST be the first entry in this structure. + * base_addr: Base address of the entry. * - * base_addr: Base address of the entry. + * len: Length of the entry. * - * len: Length of the entry. + * local_len: Length of the entry according to the cache. This + * value must be positive, and may not be larger than len. * - * local_len: Length of the entry according to the cache. This - * value must be positive, and may not be larger than len. + * The field exists to allow us change the sizes of entries + * in the cache without upsetting the server. This value + * is only used locally, and is never sent to the server. * - * The field exists to allow us change the sizes of entries - * in the cache without upsetting the server. This value - * is only used locally, and is never sent to the server. + * ver: Version number of the entry. This number is initialize + * to zero, and incremented each time the entry is modified. * - * ver: Version number of the entry. This number is initialize - * to zero, and incremented each time the entry is modified. + * dirty: Boolean flag indicating whether the entry is dirty. * - * dirty: Boolean flag indicating whether the entry is dirty. + * For current purposes, an entry is clean until it is + * modified, and dirty until written to the server (cache + * on process 0) or until it is marked clean (all other + * caches). * - * For current purposes, an entry is clean until it is - * modified, and dirty until written to the server (cache - * on process 0) or until it is marked clean (all other - * caches). + * valid: Boolean flag indicating whether the entry contains + * valid data. Attempts to read an entry whose valid + * flag is not set should trigger an error. * - * valid: Boolean flag indicating whether the entry contains - * valid data. Attempts to read an entry whose valid - * flag is not set should trigger an error. + * locked: Boolean flag that is set to true iff the entry is in + * the cache and locked. * - * locked: Boolean flag that is set to true iff the entry is in - * the cache and locked. + * global_pinned: Boolean flag that is set to true iff the entry has + * been pinned collectively in all caches. Since writes must + * be collective across all processes, only entries pinned + * in this fashion may be marked dirty. * - * global_pinned: Boolean flag that is set to true iff the entry has - * been pinned collectively in all caches. Since writes must - * be collective across all processes, only entries pinned - * in this fashion may be marked dirty. + * local_pinned: Boolean flag that is set to true iff the entry + * has been pinned in the local cache, but probably not all + * caches. Such pins will typically not be consistent across + * processes, and thus cannot be marked as dirty unless they + * happen to overlap some collective operation. * - * local_pinned: Boolean flag that is set to true iff the entry - * has been pinned in the local cache, but probably not all - * caches. Such pins will typically not be consistant across - * processes, and thus cannot be marked as dirty unless they - * happen to overlap some collective operation. + * cleared: Boolean flag that is set to true whenever the entry is + * dirty, and is cleared via a call to datum_notify with the + * "entry cleaned" action. * - * cleared: Boolean flag that is set to true whenever the entry is - * dirty, and is cleared via a call to clear_datum(). + * flushed: Boolean flag that is set to true whenever the entry is + * dirty, and is flushed by the metadata cache. * - * flushed: Boolean flag that is set to true whenever the entry is - * dirty, and is flushed via a call to flush_datum(). + * reads: Integer field used to maintain a count of the number of + * times this entry has been read from the server since + * the last time the read and write counts were reset. * - * reads: Integer field used to maintain a count of the number of - * times this entry has been read from the server since - * the last time the read and write counts were reset. + * writes: Integer field used to maintain a count of the number of + * times this entry has been written to the server since + * the last time the read and write counts were reset. * - * writes: Integer field used to maintain a count of the number of - * times this entry has been written to the server since - * the last time the read and write counts were reset. + * index: Index of this instance of datum in the data_index[] array + * discussed below. * - * index: Index of this instance of datum in the data_index[] array - * discussed below. + * aux_ptr: Pointer to the instance of H5AC_aux_t associated with the + * instance of the metadata cache within which this entry + * resides. This field was added to allow us to pass this + * value to the notify callback from the serialize callback. + * It should be NULL when not in use. * *****************************************************************************/ -struct datum -{ - H5C_cache_entry_t header; - haddr_t base_addr; - size_t len; - size_t local_len; - int ver; - hbool_t dirty; - hbool_t valid; - hbool_t locked; - hbool_t global_pinned; - hbool_t local_pinned; - hbool_t cleared; - hbool_t flushed; - int reads; - int writes; - int index; +struct datum { + H5C_cache_entry_t header; + haddr_t base_addr; + size_t len; + size_t local_len; + int ver; + hbool_t dirty; + hbool_t valid; + hbool_t locked; + hbool_t global_pinned; + hbool_t local_pinned; + hbool_t cleared; + hbool_t flushed; + int reads; + int writes; + int index; + struct H5AC_aux_t *aux_ptr; }; /***************************************************************************** * data array * - * The data array is an array of instances of datum of size - * NUM_DATA_ENTRIES that is used to track the particulars of all - * the entries that may be loaded into the cache. + * The data array is an array of instances of datum of size + * NUM_DATA_ENTRIES that is used to track the particulars of all + * the entries that may be loaded into the cache. * - * It exists on all processes, although the master copy is maintained - * by the server process. If the cache is performing correctly, all - * versions should be effectively identical. By that I mean that - * the data received from the server should always match that in - * the local version of the data array. + * It exists on all processes, although the master copy is maintained + * by the server process. If the cache is performing correctly, all + * versions should be effectively identical. By that I mean that + * the data received from the server should always match that in + * the local version of the data array. * *****************************************************************************/ -#define NUM_DATA_ENTRIES 100000 - -struct datum data[NUM_DATA_ENTRIES]; +#define NUM_DATA_ENTRIES 100000 +struct datum *data = NULL; /* Many tests use the size of data array as the size of test loops. * On some machines, this results in unacceptably long test runs. @@ -206,130 +204,123 @@ struct datum data[NUM_DATA_ENTRIES]; * even divisor of NUM_DATA_ENTRIES. So far, all tests have been with * powers of 10 that meet these criteria. * - * Further, this value must be consistant across all processes. + * Further, this value must be consistent across all processes. */ -#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES -#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10) -/* Use a smaller test size to avoid creating huge MPE logfiles. */ -#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100) +#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES +#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10) int virt_num_data_entries = NUM_DATA_ENTRIES; - /***************************************************************************** * data_index array * - * The data_index array is an array of integer used to maintain a list - * of instances of datum in the data array in increasing base_addr order. + * The data_index array is an array of integer used to maintain a list + * of instances of datum in the data array in increasing base_addr order. * - * This array is necessary, as move operations can swap the values - * of the base_addr fields of two instances of datum. Without this - * array, we would no longer be able to use a binary search on a sorted - * list to find the indexes of instances of datum given the values of - * their base_addr fields. + * This array is necessary, as move operations can swap the values + * of the base_addr fields of two instances of datum. Without this + * array, we would no longer be able to use a binary search on a sorted + * list to find the indexes of instances of datum given the values of + * their base_addr fields. * *****************************************************************************/ -int data_index[NUM_DATA_ENTRIES]; - +int *data_index = NULL; /***************************************************************************** * The following two #defines are used to control code that is in turn used - * to force "POSIX" semantics on the server process used to simulate metadata - * reads and writes. Without some such mechanism, the test code contains + * to force "POSIX" semantics on the server process used to simulate metadata + * reads and writes. Without some such mechanism, the test code contains * race conditions that will frequently cause spurious failures. * * When set to TRUE, DO_WRITE_REQ_ACK forces the server to send an ack after - * each write request, and the client to wait until the ack is received + * each write request, and the client to wait until the ack is received * before proceeding. This was my first solution to the problem, and at * first glance, it would seem to have a lot of unnecessary overhead. * * In an attempt to reduce the overhead, I implemented a second solution - * in which no acks are sent after writes. Instead, the metadata cache is - * provided with a callback function to call after each sequence of writes. - * This callback simply causes the client to send the server process a - * "sync" message and and await an ack in reply. + * in which no acks are sent after writes. Instead, the metadata cache is + * provided with a callback function to call after each sequence of writes. + * This callback simply causes the client to send the server process a + * "sync" message and await an ack in reply. * - * Strangely, at least on Phoenix, the first solution runs faster by a - * rather large margin. However, I can imagine this changing with - * different OS's and MPI implementatins. + * Strangely, at least on Phoenix, the first solution runs faster by a + * rather large margin. However, I can imagine this changing with + * different OS's and MPI implementations. * - * Thus I have left code supporting the second solution in place. + * Thus I have left code supporting the second solution in place. * - * Note that while one of these two #defines must be set to TRUE, there - * should never be any need to set both of them to TRUE (although the + * Note that while one of these two #defines must be set to TRUE, there + * should never be any need to set both of them to TRUE (although the * tests will still function with this setting). *****************************************************************************/ -#define DO_WRITE_REQ_ACK TRUE -#define DO_SYNC_AFTER_WRITE FALSE - +#define DO_WRITE_REQ_ACK TRUE +#define DO_SYNC_AFTER_WRITE FALSE /***************************************************************************** * struct mssg * - * The mssg structure is used as a generic container for messages to - * and from the server. Not all fields are used in all cases. + * The mssg structure is used as a generic container for messages to + * and from the server. Not all fields are used in all cases. * - * req: Integer field containing the type of the message. + * req: Integer field containing the type of the message. * - * src: World communicator MPI rank of the sending process. + * src: World communicator MPI rank of the sending process. * - * dest: World communicator MPI rank of the destination process. + * dest: World communicator MPI rank of the destination process. * - * mssg_num: Serial number assigned to the message by the sender. + * mssg_num: Serial number assigned to the message by the sender. * - * base_addr: Base address of a datum. Not used in all mssgs. + * base_addr: Base address of a datum. Not used in all mssgs. * - * len: Length of a datum (in bytes). Not used in all mssgs. + * len: Length of a datum (in bytes). Not used in all mssgs. * - * ver: Version number of a datum. Not used in all mssgs. + * ver: Version number of a datum. Not used in all mssgs. * - * count: Reported number of total/entry reads/writes. Not used - * in all mssgs. + * count: Reported number of total/entry reads/writes. Not used + * in all mssgs. * - * magic: Magic number for error detection. Must be set to - * MSSG_MAGIC. + * magic: Magic number for error detection. Must be set to + * MSSG_MAGIC. * *****************************************************************************/ -#define WRITE_REQ_CODE 0 -#define WRITE_REQ_ACK_CODE 1 -#define READ_REQ_CODE 2 -#define READ_REQ_REPLY_CODE 3 -#define SYNC_REQ_CODE 4 -#define SYNC_ACK_CODE 5 -#define REQ_TTL_WRITES_CODE 6 -#define REQ_TTL_WRITES_RPLY_CODE 7 -#define REQ_TTL_READS_CODE 8 -#define REQ_TTL_READS_RPLY_CODE 9 -#define REQ_ENTRY_WRITES_CODE 10 -#define REQ_ENTRY_WRITES_RPLY_CODE 11 -#define REQ_ENTRY_READS_CODE 12 -#define REQ_ENTRY_READS_RPLY_CODE 13 -#define REQ_RW_COUNT_RESET_CODE 14 -#define REQ_RW_COUNT_RESET_RPLY_CODE 15 -#define DONE_REQ_CODE 16 -#define MAX_REQ_CODE 16 - -#define MSSG_MAGIC 0x1248 - -struct mssg_t -{ - int req; - int src; - int dest; - long int mssg_num; - haddr_t base_addr; - unsigned len; - int ver; - int count; - unsigned magic; +#define WRITE_REQ_CODE 0 +#define WRITE_REQ_ACK_CODE 1 +#define READ_REQ_CODE 2 +#define READ_REQ_REPLY_CODE 3 +#define SYNC_REQ_CODE 4 +#define SYNC_ACK_CODE 5 +#define REQ_TTL_WRITES_CODE 6 +#define REQ_TTL_WRITES_RPLY_CODE 7 +#define REQ_TTL_READS_CODE 8 +#define REQ_TTL_READS_RPLY_CODE 9 +#define REQ_ENTRY_WRITES_CODE 10 +#define REQ_ENTRY_WRITES_RPLY_CODE 11 +#define REQ_ENTRY_READS_CODE 12 +#define REQ_ENTRY_READS_RPLY_CODE 13 +#define REQ_RW_COUNT_RESET_CODE 14 +#define REQ_RW_COUNT_RESET_RPLY_CODE 15 +#define DONE_REQ_CODE 16 +#define MAX_REQ_CODE 16 + +#define MSSG_MAGIC 0x1248 + +struct mssg_t { + int req; + int src; + int dest; + long int mssg_num; + haddr_t base_addr; + unsigned len; + int ver; + unsigned count; + unsigned magic; }; -MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */ - +MPI_Datatype mpi_mssg_t = MPI_DATATYPE_NULL; /* for MPI derived type created from mssg */ /*****************************************************************************/ /************************** function declarations ****************************/ @@ -343,19 +334,16 @@ static void reset_stats(void); static hbool_t set_up_file_communicator(void); - /* data array manipulation functions */ -static int addr_to_datum_index(haddr_t base_addr); +static int addr_to_datum_index(haddr_t base_addr); static void init_data(void); - /* test coodination related functions */ -static int do_express_test(void); +static int do_express_test(void); static void do_sync(void); -static int get_max_nerrors(void); - +static int get_max_nerrors(void); /* mssg xfer related functions */ @@ -364,89 +352,99 @@ static hbool_t send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag); static hbool_t setup_derived_types(void); static hbool_t takedown_derived_types(void); - /* server functions */ static hbool_t reset_server_counters(void); static hbool_t server_main(void); -static hbool_t serve_read_request(struct mssg_t * mssg_ptr); -static hbool_t serve_sync_request(struct mssg_t * mssg_ptr); -static hbool_t serve_write_request(struct mssg_t * mssg_ptr); -static hbool_t serve_total_writes_request(struct mssg_t * mssg_ptr); -static hbool_t serve_total_reads_request(struct mssg_t * mssg_ptr); -static hbool_t serve_entry_writes_request(struct mssg_t * mssg_ptr); -static hbool_t serve_entry_reads_request(struct mssg_t * mssg_ptr); -static hbool_t serve_rw_count_reset_request(struct mssg_t * mssg_ptr); - +static hbool_t serve_read_request(struct mssg_t *mssg_ptr); +static hbool_t serve_sync_request(struct mssg_t *mssg_ptr); +static hbool_t serve_write_request(struct mssg_t *mssg_ptr); +static hbool_t serve_total_writes_request(struct mssg_t *mssg_ptr); +static hbool_t serve_total_reads_request(struct mssg_t *mssg_ptr); +static hbool_t serve_entry_writes_request(struct mssg_t *mssg_ptr); +static hbool_t serve_entry_reads_request(struct mssg_t *mssg_ptr); +static hbool_t serve_rw_count_reset_request(struct mssg_t *mssg_ptr); /* call back functions & related data structures */ -static herr_t clear_datum(H5F_t * f, void * thing, hbool_t dest); -static herr_t destroy_datum(H5F_t UNUSED * f, void * thing); -static herr_t flush_datum(H5F_t *f, hid_t UNUSED dxpl_id, hbool_t dest, haddr_t addr, - void *thing); -static void * load_datum(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, - void UNUSED *udata); -static herr_t size_datum(H5F_t UNUSED * f, void * thing, size_t * size_ptr); +static herr_t datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr); -#define DATUM_ENTRY_TYPE H5AC_TEST_ID +static void *datum_deserialize(const void *image_ptr, size_t len, void *udata_ptr, hbool_t *dirty_ptr); -#define NUMBER_OF_ENTRY_TYPES 1 +static herr_t datum_image_len(const void *thing, size_t *image_len_ptr); -const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] = -{ - { - DATUM_ENTRY_TYPE, - (H5C_load_func_t)load_datum, - (H5C_flush_func_t)flush_datum, - (H5C_dest_func_t)destroy_datum, - (H5C_clear_func_t)clear_datum, - (H5C_notify_func_t)NULL, - (H5C_size_func_t)size_datum - } -}; +static herr_t datum_serialize(const H5F_t *f, void *image_ptr, size_t len, void *thing_ptr); + +static herr_t datum_notify(H5C_notify_action_t action, void *thing); +static herr_t datum_free_icr(void *thing); + +/* Masquerade as object header entries to the cache */ +#define DATUM_ENTRY_TYPE H5AC_OHDR_ID + +#define NUMBER_OF_ENTRY_TYPES 1 + +/* Note the use of the H5AC__CLASS_SKIP_READS and H5AC__CLASS_SKIP_WRITES + * flags. As a result of these flags, the metadata cache does no file I/O + * on metadata of the datum type. + * + * Instead, this test uses a server process to keep track of who has + * written and read what, and to verify that there are no messages from + * the past / future. + * + * In the callbacks for the version 2 cache, this activity was hidden in + * the load and flush callbacks. However, now we handle this function in + * notify callbacks for the after load and after flush events. + * + * JRM -- 1/13/15 + */ +const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] = {{ + /* id */ DATUM_ENTRY_TYPE, + /* name */ "datum", + /* mem_type */ H5FD_MEM_OHDR, + /* flags */ H5AC__CLASS_SKIP_READS | H5AC__CLASS_SKIP_WRITES, + /* get_initial_load_size */ datum_get_initial_load_size, + /* get_final_load_size */ NULL, + /* verify_chksum */ NULL, + /* deserialize */ datum_deserialize, + /* image_len */ datum_image_len, + /* pre_serialize */ NULL, + /* serialize */ datum_serialize, + /* notify */ datum_notify, + /* free_icr */ datum_free_icr, + /* fsf_size */ NULL, +}}; /* test utility functions */ -static void expunge_entry(H5F_t * file_ptr, int32_t idx); -static void insert_entry(H5C_t * cache_ptr, H5F_t * file_ptr, - int32_t idx, unsigned int flags); -static void local_pin_and_unpin_random_entries(H5F_t * file_ptr, int min_idx, - int max_idx, int min_count, - int max_count); -static void local_pin_random_entry(H5F_t * file_ptr, int min_idx, int max_idx); -static void local_unpin_all_entries(H5F_t * file_ptr, hbool_t via_unprotect); -static int local_unpin_next_pinned_entry(H5F_t * file_ptr, int start_idx, - hbool_t via_unprotect); -static void lock_and_unlock_random_entries(H5F_t * file_ptr, int min_idx, int max_idx, - int min_count, int max_count); -static void lock_and_unlock_random_entry(H5F_t * file_ptr, - int min_idx, int max_idx); -static void lock_entry(H5F_t * file_ptr, int32_t idx); -static void mark_entry_dirty(int32_t idx); -static void pin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global, hbool_t dirty); -#ifdef H5_METADATA_TRACE_FILE -static void pin_protected_entry(int32_t idx, hbool_t global); -#endif /* H5_METADATA_TRACE_FILE */ -static void move_entry(H5F_t * file_ptr, int32_t old_idx, int32_t new_idx); +static void expunge_entry(H5F_t *file_ptr, int32_t idx); +static void insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags); +static void local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count, + int max_count); +static void local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx); +static void local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect); +static int local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprotect); +static void lock_and_unlock_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count, + int max_count); +static void lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx); +static void lock_entry(H5F_t *file_ptr, int32_t idx); +static void mark_entry_dirty(int32_t idx); +static void pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty); +static void pin_protected_entry(int32_t idx, hbool_t global); +static void move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx); static hbool_t reset_server_counts(void); -static void resize_entry(int32_t idx, size_t new_size); -static hbool_t setup_cache_for_test(hid_t * fid_ptr, - H5F_t ** file_ptr_ptr, - H5C_t ** cache_ptr_ptr, - int metadata_write_strategy); -static void setup_rand(void); -static hbool_t take_down_cache(hid_t fid); +static void resize_entry(int32_t idx, size_t new_size); +static hbool_t setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, + int metadata_write_strategy); +static void setup_rand(void); +static hbool_t take_down_cache(hid_t fid, H5C_t *cache_ptr); static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads); static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes); static hbool_t verify_total_reads(int expected_total_reads); -static hbool_t verify_total_writes(int expected_total_writes); -static void verify_writes(int num_writes, haddr_t * written_entries_tbl); -static void unlock_entry(H5F_t * file_ptr, int32_t type, unsigned int flags); -static void unpin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global, - hbool_t dirty, hbool_t via_unprotect); - +static hbool_t verify_total_writes(unsigned expected_total_writes); +static void verify_writes(unsigned num_writes, haddr_t *written_entries_tbl); +static void unlock_entry(H5F_t *file_ptr, int32_t type, unsigned int flags); +static void unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect); /* test functions */ @@ -456,9 +454,9 @@ static hbool_t smoke_check_2(int metadata_write_strategy); static hbool_t smoke_check_3(int metadata_write_strategy); static hbool_t smoke_check_4(int metadata_write_strategy); static hbool_t smoke_check_5(int metadata_write_strategy); +static hbool_t smoke_check_6(int metadata_write_strategy); static hbool_t trace_file_check(int metadata_write_strategy); - /*****************************************************************************/ /****************************** stats functions ******************************/ /*****************************************************************************/ @@ -466,37 +464,32 @@ static hbool_t trace_file_check(int metadata_write_strategy); #ifdef NOT_USED /***************************************************************************** * - * Function: print_stats() + * Function: print_stats() * - * Purpose: Print the rudementary stats maintained by t_cache. + * Purpose: Print the rudementary stats maintained by t_cache. * - * This is a debugging function, which will not normally - * be run as part of t_cache. + * This is a debugging function, which will not normally + * be run as part of t_cache. * - * Return: void + * Return: void * - * Programmer: JRM -- 4/17/06 + * Programmer: JRM -- 4/17/06 * * Modifications: * - * None. + * None. * *****************************************************************************/ static void print_stats(void) { - HDfprintf(stdout, - "%d: datum clears / pinned clears / destroys = %ld / %ld / %ld\n", - world_mpi_rank, datum_clears, datum_pinned_clears, - datum_destroys ); - HDfprintf(stdout, - "%d: datum flushes / pinned flushes / loads = %ld / %ld / %ld\n", - world_mpi_rank, datum_flushes, datum_pinned_flushes, - datum_loads ); - HDfprintf(stdout, - "%d: pins: global / global dirty / local = %ld / %ld / %ld\n", - world_mpi_rank, global_pins, global_dirty_pins, local_pins); + HDfprintf(stdout, "%d: datum clears / pinned clears / destroys = %ld / %ld / %ld\n", world_mpi_rank, + datum_clears, datum_pinned_clears, datum_destroys); + HDfprintf(stdout, "%d: datum flushes / pinned flushes / loads = %ld / %ld / %ld\n", world_mpi_rank, + datum_flushes, datum_pinned_flushes, datum_loads); + HDfprintf(stdout, "%d: pins: global / global dirty / local = %ld / %ld / %ld\n", world_mpi_rank, + global_pins, global_dirty_pins, local_pins); HDfflush(stdout); return; @@ -506,190 +499,185 @@ print_stats(void) /***************************************************************************** * - * Function: reset_stats() + * Function: reset_stats() * - * Purpose: Reset the rudementary stats maintained by t_cache. + * Purpose: Reset the rudementary stats maintained by t_cache. * - * Return: void + * Return: void * - * Programmer: JRM -- 4/17/06 + * Programmer: JRM -- 4/17/06 * * Modifications: * - * None. + * None. * *****************************************************************************/ static void reset_stats(void) { - datum_clears = 0; - datum_pinned_clears = 0; - datum_destroys = 0; - datum_flushes = 0; - datum_pinned_flushes = 0; - datum_loads = 0; - global_pins = 0; - global_dirty_pins = 0; - local_pins = 0; + datum_clears = 0; + datum_pinned_clears = 0; + datum_destroys = 0; + datum_flushes = 0; + datum_pinned_flushes = 0; + datum_loads = 0; + global_pins = 0; + global_dirty_pins = 0; + local_pins = 0; return; } /* reset_stats() */ - /*****************************************************************************/ /**************************** MPI setup functions ****************************/ /*****************************************************************************/ /***************************************************************************** * - * Function: set_up_file_communicator() + * Function: set_up_file_communicator() * - * Purpose: Create the MPI communicator used to open a HDF5 file with. - * In passing, also initialize the file_mpi... globals. + * Purpose: Create the MPI communicator used to open a HDF5 file with. + * In passing, also initialize the file_mpi... globals. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 11/16/05 + * Programmer: JRM -- 11/16/05 * * Modifications: * - * None. + * None. * *****************************************************************************/ static hbool_t set_up_file_communicator(void) { - const char * fcn_name = "set_up_file_communicator()"; - hbool_t success = TRUE; - int mpi_result; - int num_excluded_ranks; - int excluded_ranks[1]; - MPI_Group file_group; - MPI_Group world_group; + hbool_t success = TRUE; + int mpi_result; + int num_excluded_ranks; + int excluded_ranks[1]; + MPI_Group file_group = MPI_GROUP_NULL; + MPI_Group world_group = MPI_GROUP_NULL; - if ( success ) { + if (success) { mpi_result = MPI_Comm_group(world_mpi_comm, &world_group); - if ( mpi_result != MPI_SUCCESS ) { + if (mpi_result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: MPI_Comm_group() failed with error %d.\n", - world_mpi_rank, fcn_name, mpi_result); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Comm_group() failed with error %d.\n", world_mpi_rank, __func__, + mpi_result); } } } - if ( success ) { + if (success) { num_excluded_ranks = 1; - excluded_ranks[0] = world_server_mpi_rank; - mpi_result = MPI_Group_excl(world_group, num_excluded_ranks, - excluded_ranks, &file_group); + excluded_ranks[0] = world_server_mpi_rank; + mpi_result = MPI_Group_excl(world_group, num_excluded_ranks, excluded_ranks, &file_group); - if ( mpi_result != MPI_SUCCESS ) { + if (mpi_result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: MPI_Group_excl() failed with error %d.\n", - world_mpi_rank, fcn_name, mpi_result); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Group_excl() failed with error %d.\n", world_mpi_rank, __func__, + mpi_result); } } } - if ( success ) { + if (success) { - mpi_result = MPI_Comm_create(world_mpi_comm, file_group, - &file_mpi_comm); + mpi_result = MPI_Comm_create(world_mpi_comm, file_group, &file_mpi_comm); - if ( mpi_result != MPI_SUCCESS ) { + if (mpi_result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: MPI_Comm_create() failed with error %d.\n", - world_mpi_rank, fcn_name, mpi_result); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Comm_create() failed with error %d.\n", world_mpi_rank, + __func__, mpi_result); } + } + else { - } else { - - if ( world_mpi_rank != world_server_mpi_rank ) { + if (world_mpi_rank != world_server_mpi_rank) { - if ( file_mpi_comm == MPI_COMM_NULL ) { + if (file_mpi_comm == MPI_COMM_NULL) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: file_mpi_comm == MPI_COMM_NULL.\n", world_mpi_rank, + __func__); } } - } else { + } + else { file_mpi_size = world_mpi_size - 1; /* needed by the server */ - if ( file_mpi_comm != MPI_COMM_NULL ) { + if (file_mpi_comm != MPI_COMM_NULL) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: file_mpi_comm != MPI_COMM_NULL.\n", world_mpi_rank, + __func__); } } } } } - if ( ( success ) && ( world_mpi_rank != world_server_mpi_rank ) ) { + if ((success) && (world_mpi_rank != world_server_mpi_rank)) { mpi_result = MPI_Comm_size(file_mpi_comm, &file_mpi_size); - if ( mpi_result != MPI_SUCCESS ) { + if (mpi_result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: MPI_Comm_size() failed with error %d.\n", - world_mpi_rank, fcn_name, mpi_result); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Comm_size() failed with error %d.\n", world_mpi_rank, __func__, + mpi_result); } } } - if ( ( success ) && ( world_mpi_rank != world_server_mpi_rank ) ) { + if ((success) && (world_mpi_rank != world_server_mpi_rank)) { mpi_result = MPI_Comm_rank(file_mpi_comm, &file_mpi_rank); - if ( mpi_result != MPI_SUCCESS ) { + if (mpi_result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - fprintf(stdout, - "%d:%s: MPI_Comm_rank() failed with error %d.\n", - world_mpi_rank, fcn_name, mpi_result); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Comm_rank() failed with error %d.\n", world_mpi_rank, __func__, + mpi_result); } } } - return(success); + if (file_group != MPI_GROUP_NULL) + MPI_Group_free(&file_group); -} /* set_up_file_communicator() */ + if (world_group != MPI_GROUP_NULL) + MPI_Group_free(&world_group); + + return (success); +} /* set_up_file_communicator() */ /*****************************************************************************/ /******************** data array manipulation functions **********************/ @@ -697,70 +685,65 @@ set_up_file_communicator(void) /***************************************************************************** * - * Function: addr_to_datum_index() + * Function: addr_to_datum_index() * - * Purpose: Given the base address of a datum, find and return its index - * in the data array. + * Purpose: Given the base address of a datum, find and return its index + * in the data array. * - * Return: Success: index of target datum. + * Return: Success: index of target datum. * - * Failure: -1. + * Failure: -1. * - * Programmer: JRM -- 12/20/05 + * Programmer: JRM -- 12/20/05 * *****************************************************************************/ static int addr_to_datum_index(haddr_t base_addr) { - /* const char * fcn_name = "addr_to_datum_index()"; */ - int top = NUM_DATA_ENTRIES - 1; - int bottom = 0; - int middle = (NUM_DATA_ENTRIES - 1) / 2; + int top = NUM_DATA_ENTRIES - 1; + int bottom = 0; + int middle = (NUM_DATA_ENTRIES - 1) / 2; int ret_value = -1; - while ( top >= bottom ) - { - if ( base_addr < data[data_index[middle]].base_addr ) { + while (top >= bottom) { + if (base_addr < data[data_index[middle]].base_addr) { - top = middle - 1; + top = middle - 1; middle = (top + bottom) / 2; - - } else if ( base_addr > data[data_index[middle]].base_addr ) { + } + else if (base_addr > data[data_index[middle]].base_addr) { bottom = middle + 1; middle = (top + bottom) / 2; - - } else /* ( base_addr == data[data_index[middle]].base_addr ) */ { + } + else /* ( base_addr == data[data_index[middle]].base_addr ) */ { ret_value = data_index[middle]; - bottom = top + 1; /* to force exit from while loop */ - + bottom = top + 1; /* to force exit from while loop */ } } - return(ret_value); + return (ret_value); } /* addr_to_datum_index() */ - /***************************************************************************** * - * Function: init_data() + * Function: init_data() * - * Purpose: Initialize the data array, from which cache entries are - * loaded. + * Purpose: Initialize the data array, from which cache entries are + * loaded. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/20/05 + * Programmer: JRM -- 12/20/05 * *****************************************************************************/ static void init_data(void) { - /* const char * fcn_name = "init_data()"; */ /* The set of address offsets is chosen so as to avoid allowing the * base addresses to fall in a pattern of that will annoy the hash * table, and to give a good range of entry sizes. @@ -768,21 +751,18 @@ init_data(void) * At present, I am using the first 20 entries of the Fibonacci * sequence multiplied by 2. We will see how it works. */ - const int num_addr_offsets = 20; - const haddr_t addr_offsets[20] = { 2, 2, 4, 6, 10, - 16, 26, 42, 68, 110, - 178, 288, 466, 754, 1220, - 1974, 3194, 5168, 8362, 13539}; - int i; - int j = 0; - haddr_t addr = 512; + const int num_addr_offsets = 20; + const haddr_t addr_offsets[20] = {2, 2, 4, 6, 10, 16, 26, 42, 68, 110, + 178, 288, 466, 754, 1220, 1974, 3194, 5168, 8362, 13539}; + int i; + int j = 0; + haddr_t addr = BASE_ADDR; /* this must hold so moves don't change entry size. */ - HDassert( (NUM_DATA_ENTRIES / 2) % 20 == 0 ); - HDassert( (virt_num_data_entries / 2) % 20 == 0 ); + HDassert((NUM_DATA_ENTRIES / 2) % 20 == 0); + HDassert((virt_num_data_entries / 2) % 20 == 0); - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { + for (i = 0; i < NUM_DATA_ENTRIES; i++) { data[i].base_addr = addr; data[i].len = (size_t)(addr_offsets[j]); data[i].local_len = (size_t)(addr_offsets[j]); @@ -790,116 +770,106 @@ init_data(void) data[i].dirty = FALSE; data[i].valid = FALSE; data[i].locked = FALSE; - data[i].global_pinned = FALSE; - data[i].local_pinned = FALSE; - data[i].cleared = FALSE; - data[i].flushed = FALSE; + data[i].global_pinned = FALSE; + data[i].local_pinned = FALSE; + data[i].cleared = FALSE; + data[i].flushed = FALSE; data[i].reads = 0; data[i].writes = 0; - data[i].index = i; + data[i].index = i; + data[i].aux_ptr = NULL; - data_index[i] = i; + data_index[i] = i; addr += addr_offsets[j]; - HDassert( addr > data[i].base_addr ); + HDassert(addr > data[i].base_addr); j = (j + 1) % num_addr_offsets; } + /* save the end of the address space used by the data array */ + max_addr = addr; + return; } /* init_data() */ - /*****************************************************************************/ /******************** test coodination related functions *********************/ /*****************************************************************************/ /***************************************************************************** * - * Function: do_express_test() + * Function: do_express_test() * - * Purpose: Do an MPI_Allreduce to obtain the maximum value returned - * by GetTestExpress() across all processes. Return this - * value. + * Purpose: Do an MPI_Allreduce to obtain the maximum value returned + * by GetTestExpress() across all processes. Return this + * value. * - * Envirmoment variables can be different across different - * processes. This function ensures that all processes agree - * on whether to do an express test. + * Envirmoment variables can be different across different + * processes. This function ensures that all processes agree + * on whether to do an express test. * - * Return: Success: Maximum of the values returned by - * GetTestExpress() across all processes. + * Return: Success: Maximum of the values returned by + * GetTestExpress() across all processes. * - * Failure: -1 + * Failure: -1 * - * Programmer: JRM -- 4/25/06 + * Programmer: JRM -- 4/25/06 * *****************************************************************************/ static int do_express_test(void) { - const char * fcn_name = "do_express_test()"; int express_test; int max_express_test; int result; express_test = GetTestExpress(); - result = MPI_Allreduce((void *)&express_test, - (void *)&max_express_test, - 1, - MPI_INT, - MPI_MAX, - world_mpi_comm); + result = + MPI_Allreduce((void *)&express_test, (void *)&max_express_test, 1, MPI_INT, MPI_MAX, world_mpi_comm); - if ( result != MPI_SUCCESS ) { + if (result != MPI_SUCCESS) { nerrors++; max_express_test = -1; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", - world_mpi_rank, fcn_name ); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, __func__); } } - return(max_express_test); + return (max_express_test); } /* do_express_test() */ - /***************************************************************************** * - * Function: do_sync() + * Function: do_sync() * - * Purpose: Ensure that all messages sent by this process have been - * processed before proceeding. + * Purpose: Ensure that all messages sent by this process have been + * processed before proceeding. * - * Do this by exchanging sync req / sync ack messages with - * the server. + * Do this by exchanging sync req / sync ack messages with + * the server. * - * Do nothing if nerrors is greater than zero. + * Do nothing if nerrors is greater than zero. * - * Return: void + * Return: void * - * Programmer: JRM -- 5/10/06 - * - * Modifications: - * - * None. + * Programmer: JRM -- 5/10/06 * *****************************************************************************/ - static void do_sync(void) { - const char * fcn_name = "do_sync()"; struct mssg_t mssg; - if ( nerrors <= 0 ) { + if (nerrors <= 0) { /* compose the message */ - mssg.req = SYNC_REQ_CODE; + mssg.req = SYNC_REQ_CODE; mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ @@ -909,697 +879,628 @@ do_sync(void) mssg.count = 0; mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { - nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( nerrors <= 0 ) { + if (nerrors <= 0) { - if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) { + if (!recv_mssg(&mssg, SYNC_ACK_CODE)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); } - } else if ( ( mssg.req != SYNC_ACK_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.magic != MSSG_MAGIC ) ) { + } + else if ((mssg.req != SYNC_ACK_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.magic != MSSG_MAGIC)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n", world_mpi_rank, __func__); } - } + } } return; } /* do_sync() */ - /***************************************************************************** * - * Function: get_max_nerrors() + * Function: get_max_nerrors() * - * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors - * across all processes. Return this value. + * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors + * across all processes. Return this value. * - * Return: Success: Maximum of the nerrors global variables across - * all processes. + * Return: Success: Maximum of the nerrors global variables across + * all processes. * - * Failure: -1 + * Failure: -1 * - * Programmer: JRM -- 1/3/06 - * - * Modifications: - * - * None. + * Programmer: JRM -- 1/3/06 * *****************************************************************************/ - static int get_max_nerrors(void) { - const char * fcn_name = "get_max_nerrors()"; int max_nerrors; int result; - result = MPI_Allreduce((void *)&nerrors, - (void *)&max_nerrors, - 1, - MPI_INT, - MPI_MAX, - world_mpi_comm); + result = MPI_Allreduce((void *)&nerrors, (void *)&max_nerrors, 1, MPI_INT, MPI_MAX, world_mpi_comm); - if ( result != MPI_SUCCESS ) { + if (result != MPI_SUCCESS) { nerrors++; max_nerrors = -1; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", - world_mpi_rank, fcn_name ); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n", world_mpi_rank, __func__); } } - return(max_nerrors); + return (max_nerrors); } /* get_max_nerrors() */ - /*****************************************************************************/ /************************ mssg xfer related functions ************************/ /*****************************************************************************/ /***************************************************************************** * - * Function: recv_mssg() + * Function: recv_mssg() * - * Purpose: Receive a message from any process in the provided instance - * of struct mssg. + * Purpose: Receive a message from any process in the provided instance + * of struct mssg. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/22/05 + * Programmer: JRM -- 12/22/05 * * Modifications: * - * JRM -- 5/10/06 - * Added mssg_tag_offset parameter and supporting code. + * JRM -- 5/10/06 + * Added mssg_tag_offset parameter and supporting code. * *****************************************************************************/ -#define CACHE_TEST_TAG 99 /* different from any used by the library */ +#define CACHE_TEST_TAG 99 /* different from any used by the library */ static hbool_t -recv_mssg(struct mssg_t *mssg_ptr, - int mssg_tag_offset) +recv_mssg(struct mssg_t *mssg_ptr, int mssg_tag_offset) { - const char * fcn_name = "recv_mssg()"; - hbool_t success = TRUE; - int mssg_tag = CACHE_TEST_TAG; - int result; + hbool_t success = TRUE; + int mssg_tag = CACHE_TEST_TAG; + int result; MPI_Status status; - if ( ( mssg_ptr == NULL ) || - ( mssg_tag_offset < 0 ) || - ( mssg_tag_offset> MAX_REQ_CODE ) ) { + if ((mssg_ptr == NULL) || (mssg_tag_offset < 0) || (mssg_tag_offset > MAX_REQ_CODE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: bad param(s) on entry.\n", world_mpi_rank, __func__); } - } else { + } + else { mssg_tag += mssg_tag_offset; } - if ( success ) { + if (success) { - result = MPI_Recv((void *)mssg_ptr, 1, mpi_mssg_t, MPI_ANY_SOURCE, - mssg_tag, world_mpi_comm, &status); + result = MPI_Recv((void *)mssg_ptr, 1, mpi_mssg_t, MPI_ANY_SOURCE, mssg_tag, world_mpi_comm, &status); - if ( result != MPI_SUCCESS ) { + if (result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n", - world_mpi_rank, fcn_name ); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Recv() failed.\n", world_mpi_rank, __func__); } - } else if ( mssg_ptr->magic != MSSG_MAGIC ) { + } + else if (mssg_ptr->magic != MSSG_MAGIC) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, - fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: invalid magic.\n", world_mpi_rank, __func__); } - } else if ( mssg_ptr->src != status.MPI_SOURCE ) { + } + else if (mssg_ptr->src != status.MPI_SOURCE) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: mssg_ptr->src != status.MPI_SOURCE.\n", world_mpi_rank, __func__); } } } - return(success); + return (success); } /* recv_mssg() */ - /***************************************************************************** * - * Function: send_mssg() + * Function: send_mssg() * - * Purpose: Send the provided instance of mssg to the indicated target. + * Purpose: Send the provided instance of mssg to the indicated target. * - * Note that all source and destination ranks are in the - * global communicator. + * Note that all source and destination ranks are in the + * global communicator. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/22/05 + * Programmer: JRM -- 12/22/05 * * Modifications: * - * JRM -- 5/10/06 - * Added the add_req_to_tag parameter and supporting code. + * JRM -- 5/10/06 + * Added the add_req_to_tag parameter and supporting code. * *****************************************************************************/ - static hbool_t -send_mssg(struct mssg_t *mssg_ptr, - hbool_t add_req_to_tag) +send_mssg(struct mssg_t *mssg_ptr, hbool_t add_req_to_tag) { - const char * fcn_name = "send_mssg()"; - hbool_t success = TRUE; - int mssg_tag = CACHE_TEST_TAG; - int result; + hbool_t success = TRUE; + int mssg_tag = CACHE_TEST_TAG; + int result; static long mssg_num = 0; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->src != world_mpi_rank ) || - ( mssg_ptr->dest < 0 ) || - ( mssg_ptr->dest == mssg_ptr->src ) || - ( mssg_ptr->dest >= world_mpi_size ) || - ( mssg_ptr->req < 0 ) || - ( mssg_ptr->req > MAX_REQ_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->src != world_mpi_rank) || (mssg_ptr->dest < 0) || + (mssg_ptr->dest == mssg_ptr->src) || (mssg_ptr->dest >= world_mpi_size) || (mssg_ptr->req < 0) || + (mssg_ptr->req > MAX_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Invalid mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { mssg_ptr->mssg_num = mssg_num++; - if ( add_req_to_tag ) { + if (add_req_to_tag) { - mssg_tag += mssg_ptr->req; - } + mssg_tag += mssg_ptr->req; + } - result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t, - mssg_ptr->dest, mssg_tag, world_mpi_comm); + result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t, mssg_ptr->dest, mssg_tag, world_mpi_comm); - if ( result != MPI_SUCCESS ) { + if (result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Send() failed.\n", world_mpi_rank, __func__); } } } - return(success); + return (success); } /* send_mssg() */ - /***************************************************************************** * - * Function: setup_derived_types() + * Function: setup_derived_types() * - * Purpose: Set up the derived types used by the test bed. At present, - * only the mpi_mssg derived type is needed. + * Purpose: Set up the derived types used by the test bed. At present, + * only the mpi_mssg derived type is needed. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/22/05 + * Programmer: JRM -- 12/22/05 * *****************************************************************************/ static hbool_t setup_derived_types(void) { - const char * fcn_name = "setup_derived_types()"; - hbool_t success = TRUE; - int i; - int result; - MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, - HADDR_AS_MPI_TYPE, MPI_INT, MPI_INT, - MPI_INT, MPI_UNSIGNED}; - int block_len[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1}; - MPI_Aint displs[9]; + hbool_t success = TRUE; + int i; + int result; + MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG, HADDR_AS_MPI_TYPE, + MPI_INT, MPI_INT, MPI_UNSIGNED, MPI_UNSIGNED}; + int block_len[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1}; + MPI_Aint displs[9]; struct mssg_t sample; /* used to compute displacements */ + HDmemset(&sample, 0, sizeof(struct mssg_t)); + /* setup the displacements array */ - if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) || - ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) { + if ((MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0])) || + (MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1])) || + (MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2])) || + (MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3])) || + (MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4])) || + (MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5])) || + (MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6])) || + (MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7])) || + (MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]))) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n", world_mpi_rank, __func__); } - - } else { + } + else { /* Now calculate the actual displacements */ - for ( i = 8; i >= 0; --i) - { + for (i = 8; i >= 0; --i) { displs[i] -= displs[0]; } } - if ( success ) { + if (success) { - result = MPI_Type_struct(9, block_len, displs, mpi_types, &mpi_mssg_t); + result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t); - if ( result != MPI_SUCCESS ) { + if (result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { result = MPI_Type_commit(&mpi_mssg_t); - if ( result != MPI_SUCCESS) { + if (result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Type_commit() call failed.\n", world_mpi_rank, __func__); } } } - return(success); + return (success); } /* setup_derived_types */ - /***************************************************************************** * - * Function: takedown_derived_types() + * Function: takedown_derived_types() * - * Purpose: take down the derived types used by the test bed. At present, - * only the mpi_mssg derived type is needed. + * Purpose: take down the derived types used by the test bed. At present, + * only the mpi_mssg derived type is needed. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/22/05 - * - * Modifications: - * - * None. + * Programmer: JRM -- 12/22/05 * *****************************************************************************/ - static hbool_t takedown_derived_types(void) { - const char * fcn_name = "takedown_derived_types()"; hbool_t success = TRUE; - int result; + int result; + + if (mpi_mssg_t == MPI_DATATYPE_NULL) + return (success); result = MPI_Type_free(&mpi_mssg_t); - if ( result != MPI_SUCCESS ) { + if (result != MPI_SUCCESS) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: MPI_Type_free() call failed.\n", world_mpi_rank, __func__); } } - return(success); + return (success); } /* takedown_derived_types() */ - /*****************************************************************************/ /***************************** server functions ******************************/ /*****************************************************************************/ /***************************************************************************** * - * Function: reset_server_counters() + * Function: reset_server_counters() * - * Purpose: Reset the counters maintained by the server, doing a - * sanity check in passing. + * Purpose: Reset the counters maintained by the server, doing a + * sanity check in passing. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/5/10 - * - * Modifications: - * - * None. + * Programmer: JRM -- 5/5/10 * *****************************************************************************/ - static hbool_t reset_server_counters(void) { - const char * fcn_name = "reset_server_counters()"; hbool_t success = TRUE; - int i; - long actual_total_reads = 0; - long actual_total_writes = 0; + int i; + long actual_total_reads = 0; + long actual_total_writes = 0; - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - if ( data[i].reads > 0 ) { + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + if (data[i].reads > 0) { actual_total_reads += data[i].reads; data[i].reads = 0; } - if ( data[i].writes > 0 ) { + if (data[i].writes > 0) { actual_total_writes += data[i].writes; data[i].writes = 0; } } - if ( actual_total_reads != total_reads ) { + if (actual_total_reads != total_reads) { success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, + if (verbose) { + HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%d).\n", world_mpi_rank, __func__, actual_total_reads, total_reads); } } - if ( actual_total_writes != total_writes ) { + if (actual_total_writes != total_writes) { success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, + if (verbose) { + HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%d).\n", world_mpi_rank, __func__, actual_total_writes, total_writes); } } - total_reads = 0; + total_reads = 0; total_writes = 0; - return(success); + return (success); } /* reset_server_counters() */ - /***************************************************************************** * - * Function: server_main() + * Function: server_main() * - * Purpose: Main function for the server process. This process exists - * to provide an independant view of the data array. + * Purpose: Main function for the server process. This process exists + * to provide an independent view of the data array. * - * The function handles request from the other processes in - * the test until the count of done messages received equals - * the number of client processes. + * The function handles request from the other processes in + * the test until the count of done messages received equals + * the number of client processes. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/22/05 + * Programmer: JRM -- 12/22/05 * * Modifications: * - * JRM -- 5/10/06 - * Updated for sync message. + * JRM -- 5/10/06 + * Updated for sync message. * *****************************************************************************/ - static hbool_t server_main(void) { - const char * fcn_name = "server_main()"; - hbool_t done = FALSE; - hbool_t success = TRUE; - int done_count = 0; + hbool_t done = FALSE; + hbool_t success = TRUE; + int done_count = 0; struct mssg_t mssg; - if ( world_mpi_rank != world_server_mpi_rank ) { + if (world_mpi_rank != world_server_mpi_rank) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: This isn't the server process?!?!?\n", world_mpi_rank, __func__); } } - - while ( ( success ) && ( ! done ) ) - { + while ((success) && (!done)) { success = recv_mssg(&mssg, 0); - if ( success ) { + if (success) { - switch ( mssg.req ) - { - case WRITE_REQ_CODE: - success = serve_write_request(&mssg); - break; + switch (mssg.req) { + case WRITE_REQ_CODE: + success = serve_write_request(&mssg); + break; - case WRITE_REQ_ACK_CODE: + case WRITE_REQ_ACK_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received write ack?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received write ack?!?.\n", __func__); + break; - case READ_REQ_CODE: + case READ_REQ_CODE: success = serve_read_request(&mssg); - break; + break; - case READ_REQ_REPLY_CODE: + case READ_REQ_REPLY_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received read req reply?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received read req reply?!?.\n", __func__); + break; - case SYNC_REQ_CODE: + case SYNC_REQ_CODE: success = serve_sync_request(&mssg); - break; + break; - case SYNC_ACK_CODE: + case SYNC_ACK_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received sync ack?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received sync ack?!?.\n", __func__); + break; - case REQ_TTL_WRITES_CODE: - success = serve_total_writes_request(&mssg); - break; + case REQ_TTL_WRITES_CODE: + success = serve_total_writes_request(&mssg); + break; - case REQ_TTL_WRITES_RPLY_CODE: + case REQ_TTL_WRITES_RPLY_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received total writes reply?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received total writes reply?!?.\n", __func__); + break; - case REQ_TTL_READS_CODE: - success = serve_total_reads_request(&mssg); - break; + case REQ_TTL_READS_CODE: + success = serve_total_reads_request(&mssg); + break; - case REQ_TTL_READS_RPLY_CODE: + case REQ_TTL_READS_RPLY_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received total reads reply?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received total reads reply?!?.\n", __func__); + break; - case REQ_ENTRY_WRITES_CODE: - success = serve_entry_writes_request(&mssg); - break; + case REQ_ENTRY_WRITES_CODE: + success = serve_entry_writes_request(&mssg); + break; - case REQ_ENTRY_WRITES_RPLY_CODE: + case REQ_ENTRY_WRITES_RPLY_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", __func__); + break; - case REQ_ENTRY_READS_CODE: - success = serve_entry_reads_request(&mssg); - break; + case REQ_ENTRY_READS_CODE: + success = serve_entry_reads_request(&mssg); + break; - case REQ_ENTRY_READS_RPLY_CODE: + case REQ_ENTRY_READS_RPLY_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", __func__); + break; - case REQ_RW_COUNT_RESET_CODE: - success = serve_rw_count_reset_request(&mssg); - break; + case REQ_RW_COUNT_RESET_CODE: + success = serve_rw_count_reset_request(&mssg); + break; - case REQ_RW_COUNT_RESET_RPLY_CODE: + case REQ_RW_COUNT_RESET_RPLY_CODE: success = FALSE; - if(verbose) - HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", __func__); + break; - case DONE_REQ_CODE: - done_count++; - if(done_count >= file_mpi_size) - done = TRUE; - break; + case DONE_REQ_CODE: + done_count++; + if (done_count >= file_mpi_size) + done = TRUE; + break; - default: + default: nerrors++; success = FALSE; - if(verbose) - HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, fcn_name); - break; + if (verbose) + HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, __func__); + break; } } } - return(success); + return (success); } /* server_main() */ - /***************************************************************************** * - * Function: serve_read_request() + * Function: serve_read_request() * - * Purpose: Serve a read request. + * Purpose: Serve a read request. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it sends - * a copy of the indicated datum from the data array to - * the requesting process. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it sends + * a copy of the indicated datum from the data array to + * the requesting process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/22/05 + * Programmer: JRM -- 12/22/05 * *****************************************************************************/ static hbool_t -serve_read_request(struct mssg_t * mssg_ptr) +serve_read_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_read_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; - int target_index; - haddr_t target_addr; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; + int target_index; + haddr_t target_addr; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != READ_REQ_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != READ_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { - target_addr = mssg_ptr->base_addr; + target_addr = mssg_ptr->base_addr; target_index = addr_to_datum_index(target_addr); - if ( target_index < 0 ) { + if (target_index < 0) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + if (verbose) { + HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__, + target_addr); } - } else if ( data[target_index].len != mssg_ptr->len ) { + } + else if (data[target_index].len != mssg_ptr->len) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", - world_mpi_rank, fcn_name, - data[target_index].len, mssg_ptr->len); + if (verbose) { + HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, __func__, + data[target_index].len, mssg_ptr->len); } - } else if ( ! (data[target_index].valid) ) { + } + else if (!(data[target_index].valid)) { nerrors++; success = FALSE; - if ( verbose ) { + if (verbose) { HDfprintf(stdout, - "%d:%s: proc %d read invalid entry. idx/base_addr = %d/0x%llx.\n", - world_mpi_rank, fcn_name, - mssg_ptr->src, - target_index, - (long long)(data[target_index].base_addr)); + "%d:%s: proc %d read invalid entry. " + "idx/base_addr = %d/%" PRIuHADDR ".\n", + world_mpi_rank, __func__, mssg_ptr->src, target_index, + data[target_index].base_addr); } - } else { + } + else { /* compose the reply message */ reply.req = READ_REQ_REPLY_CODE; @@ -1607,95 +1508,80 @@ serve_read_request(struct mssg_t * mssg_ptr) reply.dest = mssg_ptr->src; reply.mssg_num = -1; /* set by send function */ reply.base_addr = data[target_index].base_addr; - reply.len = data[target_index].len; - reply.ver = data[target_index].ver; - reply.count = 0; - reply.magic = MSSG_MAGIC; + H5_CHECKED_ASSIGN(reply.len, unsigned, data[target_index].len, size_t); + reply.ver = data[target_index].ver; + reply.count = 0; + reply.magic = MSSG_MAGIC; - /* and update the counters */ - total_reads++; + /* and update the counters */ + total_reads++; (data[target_index].reads)++; } } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { + if (report_mssg) { - if ( success ) { + if (success) { - HDfprintf(stdout, "%d read 0x%llx. len = %d. ver = %d.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (int)(data[target_index].len), + HDfprintf(stdout, "%d read 0x%llx. len = %d. ver = %d.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (int)(data[target_index].len), (int)(data[target_index].ver)); + } + else { - } else { - - HDfprintf(stdout, "%d read 0x%llx FAILED. len = %d. ver = %d.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (int)(data[target_index].len), + HDfprintf(stdout, "%d read 0x%llx FAILED. len = %d. ver = %d.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (int)(data[target_index].len), (int)(data[target_index].ver)); - } - } + } - return(success); + return (success); } /* serve_read_request() */ - /***************************************************************************** * - * Function: serve_sync_request() - * - * Purpose: Serve a sync request. - * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it sends a - * sync ack to the requesting process. + * Function: serve_sync_request() * - * This service exist to allow the sending process to ensure - * that all previous messages have been processed before - * proceeding. + * Purpose: Serve a sync request. * - * Return: Success: TRUE + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it sends a + * sync ack to the requesting process. * - * Failure: FALSE + * This service exist to allow the sending process to ensure + * that all previous messages have been processed before + * proceeding. * - * Programmer: JRM -- 5/10/06 + * Return: Success: TRUE * - * Modifications: + * Failure: FALSE * - * None. + * Programmer: JRM -- 5/10/06 * *****************************************************************************/ - static hbool_t -serve_sync_request(struct mssg_t * mssg_ptr) +serve_sync_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_sync_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != SYNC_REQ_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != SYNC_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { /* compose the reply message */ reply.req = SYNC_ACK_CODE; @@ -1705,127 +1591,119 @@ serve_sync_request(struct mssg_t * mssg_ptr) reply.base_addr = 0; reply.len = 0; reply.ver = 0; - reply.count = 0; + reply.count = 0; reply.magic = MSSG_MAGIC; } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { + if (report_mssg) { - if ( success ) { + if (success) { HDfprintf(stdout, "%d sync.\n", (int)(mssg_ptr->src)); - - } else { + } + else { HDfprintf(stdout, "%d sync FAILED.\n", (int)(mssg_ptr->src)); - } - } + } - return(success); + return (success); } /* serve_sync_request() */ - /***************************************************************************** * - * Function: serve_write_request() + * Function: serve_write_request() * - * Purpose: Serve a write request. + * Purpose: Serve a write request. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it updates - * the version number of the target data array entry as - * specified in the message. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it updates + * the version number of the target data array entry as + * specified in the message. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/21/05 + * Programmer: JRM -- 12/21/05 * *****************************************************************************/ static hbool_t -serve_write_request(struct mssg_t * mssg_ptr) +serve_write_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_write_request()"; hbool_t report_mssg = FALSE; - hbool_t success = TRUE; - int target_index; - int new_ver_num; + hbool_t success = TRUE; + int target_index; + int new_ver_num = 0; haddr_t target_addr; #if DO_WRITE_REQ_ACK struct mssg_t reply; #endif /* DO_WRITE_REQ_ACK */ - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != WRITE_REQ_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != WRITE_REQ_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { - target_addr = mssg_ptr->base_addr; + target_addr = mssg_ptr->base_addr; target_index = addr_to_datum_index(target_addr); - if ( target_index < 0 ) { + if (target_index < 0) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + if (verbose) { + HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__, + target_addr); } - } else if ( data[target_index].len != mssg_ptr->len ) { + } + else if (data[target_index].len != mssg_ptr->len) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: data[i].len = %Zu != mssg->len = %d.\n", - world_mpi_rank, fcn_name, + if (verbose) { + HDfprintf(stdout, "%d:%s: data[i].len = %zu != mssg->len = %d.\n", world_mpi_rank, __func__, data[target_index].len, mssg_ptr->len); } } } - if ( success ) { + if (success) { new_ver_num = mssg_ptr->ver; /* this check should catch duplicate writes */ - if ( new_ver_num <= data[target_index].ver ) { + if (new_ver_num <= data[target_index].ver) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", - world_mpi_rank, fcn_name, + if (verbose) { + HDfprintf(stdout, "%d:%s: new ver = %d <= old ver = %d.\n", world_mpi_rank, __func__, new_ver_num, data[target_index].ver); } } } - if ( success ) { + if (success) { - /* process the write */ - data[target_index].ver = new_ver_num; + /* process the write */ + data[target_index].ver = new_ver_num; data[target_index].valid = TRUE; /* and update the counters */ - total_writes++; + total_writes++; (data[target_index].writes)++; #if DO_WRITE_REQ_ACK @@ -1836,84 +1714,73 @@ serve_write_request(struct mssg_t * mssg_ptr) reply.dest = mssg_ptr->src; reply.mssg_num = -1; /* set by send function */ reply.base_addr = data[target_index].base_addr; - reply.len = data[target_index].len; - reply.ver = data[target_index].ver; - reply.count = 0; - reply.magic = MSSG_MAGIC; + H5_CHECKED_ASSIGN(reply.len, unsigned, data[target_index].len, size_t); + reply.ver = data[target_index].ver; + reply.count = 0; + reply.magic = MSSG_MAGIC; - /* and send it */ + /* and send it */ success = send_mssg(&reply, TRUE); #endif /* DO_WRITE_REQ_ACK */ - } - if ( report_mssg ) { + if (report_mssg) { - if ( success ) { + if (success) { - HDfprintf(stdout, "%d write 0x%llx. len = %d. ver = %d.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (int)(data[target_index].len), + HDfprintf(stdout, "%d write 0x%llx. len = %d. ver = %d.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (int)(data[target_index].len), (int)(data[target_index].ver)); + } + else { - } else { - - HDfprintf(stdout, "%d write 0x%llx FAILED. len = %d. ver = %d.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (int)(data[target_index].len), + HDfprintf(stdout, "%d write 0x%llx FAILED. len = %d. ver = %d.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (int)(data[target_index].len), (int)(data[target_index].ver)); - } - } + } - return(success); + return (success); } /* serve_write_request() */ - /***************************************************************************** * - * Function: serve_total_writes_request() + * Function: serve_total_writes_request() * - * Purpose: Serve a request for the total number of writes recorded since - * the last reset. + * Purpose: Serve a request for the total number of writes recorded since + * the last reset. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it sends - * the current value of the total_writes global variable to - * the requesting process. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it sends + * the current value of the total_writes global variable to + * the requesting process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/5/10 + * Programmer: JRM -- 5/5/10 * *****************************************************************************/ static hbool_t -serve_total_writes_request(struct mssg_t * mssg_ptr) +serve_total_writes_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_total_writes_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != REQ_TTL_WRITES_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_TTL_WRITES_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { /* compose the reply message */ reply.req = REQ_TTL_WRITES_RPLY_CODE; @@ -1923,77 +1790,67 @@ serve_total_writes_request(struct mssg_t * mssg_ptr) reply.base_addr = 0; reply.len = 0; reply.ver = 0; - reply.count = total_writes; + reply.count = (unsigned)total_writes; reply.magic = MSSG_MAGIC; } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { - - if ( success ) { + if (report_mssg) { - HDfprintf(stdout, "%d request total writes %ld.\n", - (int)(mssg_ptr->src), - total_writes); + if (success) { - } else { - - HDfprintf(stdout, "%d request total writes %ld -- FAILED.\n", - (int)(mssg_ptr->src), - total_writes); + HDfprintf(stdout, "%d request total writes %d.\n", (int)(mssg_ptr->src), total_writes); + } + else { + HDfprintf(stdout, "%d request total writes %d -- FAILED.\n", (int)(mssg_ptr->src), total_writes); } - } + } - return(success); + return (success); } /* serve_total_writes_request() */ - /***************************************************************************** * - * Function: serve_total_reads_request() + * Function: serve_total_reads_request() * - * Purpose: Serve a request for the total number of reads recorded since - * the last reset. + * Purpose: Serve a request for the total number of reads recorded since + * the last reset. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it sends - * the current value of the total_reads global variable to - * the requesting process. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it sends + * the current value of the total_reads global variable to + * the requesting process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/5/10 + * Programmer: JRM -- 5/5/10 * *****************************************************************************/ static hbool_t -serve_total_reads_request(struct mssg_t * mssg_ptr) +serve_total_reads_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_total_reads_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != REQ_TTL_READS_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_TTL_READS_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { /* compose the reply message */ reply.req = REQ_TTL_READS_RPLY_CODE; @@ -2003,92 +1860,83 @@ serve_total_reads_request(struct mssg_t * mssg_ptr) reply.base_addr = 0; reply.len = 0; reply.ver = 0; - reply.count = total_reads; + reply.count = (unsigned)total_reads; reply.magic = MSSG_MAGIC; } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { + if (report_mssg) { - if ( success ) { + if (success) { - HDfprintf(stdout, "%d request total reads %ld.\n", - (int)(mssg_ptr->src), - total_reads); - - } else { - - HDfprintf(stdout, "%d request total reads %ld -- FAILED.\n", - (int)(mssg_ptr->src), - total_reads); + HDfprintf(stdout, "%d request total reads %d.\n", (int)(mssg_ptr->src), total_reads); + } + else { + HDfprintf(stdout, "%d request total reads %d -- FAILED.\n", (int)(mssg_ptr->src), total_reads); } - } + } - return(success); + return (success); } /* serve_total_reads_request() */ - /***************************************************************************** * - * Function: serve_entry_writes_request() + * Function: serve_entry_writes_request() * - * Purpose: Serve an entry writes request. + * Purpose: Serve an entry writes request. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it sends - * the number of times that the indicated datum has been - * written since the last counter reset to the requesting - * process. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it sends + * the number of times that the indicated datum has been + * written since the last counter reset to the requesting + * process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/5/10 + * Programmer: JRM -- 5/5/10 * *****************************************************************************/ static hbool_t -serve_entry_writes_request(struct mssg_t * mssg_ptr) +serve_entry_writes_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_entry_writes_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; - int target_index; - haddr_t target_addr; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; + int target_index; + haddr_t target_addr; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != REQ_ENTRY_WRITES_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_ENTRY_WRITES_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { - target_addr = mssg_ptr->base_addr; + target_addr = mssg_ptr->base_addr; target_index = addr_to_datum_index(target_addr); - if ( target_index < 0 ) { + if (target_index < 0) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + if (verbose) { + HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__, + target_addr); } - } else { + } + else { /* compose the reply message */ reply.req = REQ_ENTRY_WRITES_RPLY_CODE; @@ -2098,95 +1946,86 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr) reply.base_addr = target_addr; reply.len = 0; reply.ver = 0; - reply.count = data[target_index].writes; + reply.count = (unsigned)data[target_index].writes; reply.magic = MSSG_MAGIC; } } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { + if (report_mssg) { - if ( success ) { + if (success) { - HDfprintf(stdout, "%d request entry 0x%llx writes = %ld.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (long)(data[target_index].writes)); - - } else { - - HDfprintf(stdout, "%d request entry 0x%llx writes = %ld FAILED.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (long)(data[target_index].writes)); + HDfprintf(stdout, "%d request entry 0x%llx writes = %ld.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (long)(data[target_index].writes)); + } + else { + HDfprintf(stdout, "%d request entry 0x%llx writes = %ld FAILED.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (long)(data[target_index].writes)); } - } + } - return(success); + return (success); } /* serve_entry_writes_request() */ - /***************************************************************************** * - * Function: serve_entry_reads_request() + * Function: serve_entry_reads_request() * - * Purpose: Serve an entry reads request. + * Purpose: Serve an entry reads request. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it sends - * the number of times that the indicated datum has been - * read since the last counter reset to the requesting - * process. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it sends + * the number of times that the indicated datum has been + * read since the last counter reset to the requesting + * process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/5/10 + * Programmer: JRM -- 5/5/10 * *****************************************************************************/ static hbool_t -serve_entry_reads_request(struct mssg_t * mssg_ptr) +serve_entry_reads_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_entry_reads_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; - int target_index; - haddr_t target_addr; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; + int target_index; + haddr_t target_addr; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != REQ_ENTRY_READS_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_ENTRY_READS_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { - target_addr = mssg_ptr->base_addr; + target_addr = mssg_ptr->base_addr; target_index = addr_to_datum_index(target_addr); - if ( target_index < 0 ) { + if (target_index < 0) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: addr lookup failed for %a.\n", - world_mpi_rank, fcn_name, target_addr); + if (verbose) { + HDfprintf(stdout, "%d:%s: addr lookup failed for %" PRIuHADDR ".\n", world_mpi_rank, __func__, + target_addr); } - } else { + } + else { /* compose the reply message */ reply.req = REQ_ENTRY_READS_RPLY_CODE; @@ -2196,84 +2035,74 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr) reply.base_addr = target_addr; reply.len = 0; reply.ver = 0; - reply.count = (long)(data[target_index].reads); + reply.count = (unsigned)(data[target_index].reads); reply.magic = MSSG_MAGIC; } } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { - - if ( success ) { + if (report_mssg) { - HDfprintf(stdout, "%d request entry 0x%llx reads = %ld.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (long)(data[target_index].reads)); + if (success) { - } else { - - HDfprintf(stdout, "%d request entry 0x%llx reads = %ld FAILED.\n", - (int)(mssg_ptr->src), - (long long)(data[target_index].base_addr), - (long)(data[target_index].reads)); + HDfprintf(stdout, "%d request entry 0x%llx reads = %ld.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (long)(data[target_index].reads)); + } + else { + HDfprintf(stdout, "%d request entry 0x%llx reads = %ld FAILED.\n", (int)(mssg_ptr->src), + (long long)(data[target_index].base_addr), (long)(data[target_index].reads)); } - } + } - return(success); + return (success); } /* serve_entry_reads_request() */ - /***************************************************************************** * - * Function: serve_rw_count_reset_request() + * Function: serve_rw_count_reset_request() * - * Purpose: Serve read/write count reset request. + * Purpose: Serve read/write count reset request. * - * The function accepts a pointer to an instance of struct - * mssg_t as input. If all sanity checks pass, it resets the - * read/write counters, and sends a confirmation message to - * the calling process. + * The function accepts a pointer to an instance of struct + * mssg_t as input. If all sanity checks pass, it resets the + * read/write counters, and sends a confirmation message to + * the calling process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/5/10 + * Programmer: JRM -- 5/5/10 * *****************************************************************************/ static hbool_t -serve_rw_count_reset_request(struct mssg_t * mssg_ptr) +serve_rw_count_reset_request(struct mssg_t *mssg_ptr) { - const char * fcn_name = "serve_rw_count_reset_request()"; - hbool_t report_mssg = FALSE; - hbool_t success = TRUE; + hbool_t report_mssg = FALSE; + hbool_t success = TRUE; struct mssg_t reply; - if ( ( mssg_ptr == NULL ) || - ( mssg_ptr->req != REQ_RW_COUNT_RESET_CODE ) || - ( mssg_ptr->magic != MSSG_MAGIC ) ) { + if ((mssg_ptr == NULL) || (mssg_ptr->req != REQ_RW_COUNT_RESET_CODE) || (mssg_ptr->magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad mssg on entry.\n", world_mpi_rank, __func__); } } - if ( success ) { + if (success) { success = reset_server_counters(); - } + } - if ( success ) { + if (success) { /* compose the reply message */ reply.req = REQ_RW_COUNT_RESET_RPLY_CODE; @@ -2287,522 +2116,677 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr) reply.magic = MSSG_MAGIC; } - if ( success ) { + if (success) { success = send_mssg(&reply, TRUE); } - if ( report_mssg ) { + if (report_mssg) { - if ( success ) { + if (success) { - HDfprintf(stdout, "%d request R/W counter reset.\n", - (int)(mssg_ptr->src)); - - } else { - - HDfprintf(stdout, "%d request R/w counter reset FAILED.\n", - (int)(mssg_ptr->src)); + HDfprintf(stdout, "%d request R/W counter reset.\n", (int)(mssg_ptr->src)); + } + else { + HDfprintf(stdout, "%d request R/w counter reset FAILED.\n", (int)(mssg_ptr->src)); } - } + } - return(success); + return (success); } /* serve_rw_count_reset_request() */ - /*****************************************************************************/ /**************************** Call back functions ****************************/ /*****************************************************************************/ - /*------------------------------------------------------------------------- - * Function: clear_datum + * Function: datum_get_initial_load_size * - * Purpose: Mark the datum as clean and destroy it if requested. - * Do not write it to the server, or increment the version. + * Purpose: Query the image size for an entry before deserializing it * - * Return: SUCCEED + * Return: SUCCEED * - * Programmer: John Mainzer - * 12/29/05 + * Programmer: Quincey Koziol + * 5/18/10 * *------------------------------------------------------------------------- */ static herr_t -clear_datum(H5F_t * f, - void * thing, - hbool_t dest) +datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr) { - int idx; - struct datum * entry_ptr; + haddr_t addr = *(haddr_t *)udata_ptr; + int idx; + struct datum *entry_ptr; - HDassert( thing ); + HDassert(udata_ptr); + HDassert(image_len_ptr); - entry_ptr = (struct datum *)thing; - - idx = addr_to_datum_index(entry_ptr->base_addr); + idx = addr_to_datum_index(addr); - HDassert( idx >= 0 ); - HDassert( idx < NUM_DATA_ENTRIES ); - HDassert( idx < virt_num_data_entries ); - HDassert( &(data[idx]) == entry_ptr ); + HDassert(idx >= 0); + HDassert(idx < NUM_DATA_ENTRIES); + HDassert(idx < virt_num_data_entries); - HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); - HDassert( ( entry_ptr->header.size == entry_ptr->len ) || - ( entry_ptr->header.size == entry_ptr->local_len ) ); + entry_ptr = &(data[idx]); - HDassert( entry_ptr->header.is_dirty == entry_ptr->dirty ); + HDassert(addr == entry_ptr->base_addr); + HDassert(!entry_ptr->global_pinned); + HDassert(!entry_ptr->local_pinned); - if ( entry_ptr->header.is_dirty ) { + if (callbacks_verbose) { - entry_ptr->cleared = TRUE; + HDfprintf(stdout, "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n", world_mpi_rank, + idx, (long)addr, (int)entry_ptr->local_len); + fflush(stdout); } - entry_ptr->header.is_dirty = FALSE; - entry_ptr->dirty = FALSE; + /* Set image length size */ + *image_len_ptr = entry_ptr->local_len; - if ( dest ) { + return (SUCCEED); +} /* get_initial_load_size() */ + +/*------------------------------------------------------------------------- + * Function: datum_deserialize + * + * Purpose: deserialize the entry. + * + * Return: void * (pointer to the in core representation of the entry) + * + * Programmer: John Mainzer + * 9/20/07 + * + *------------------------------------------------------------------------- + */ +static void * +datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr, H5_ATTR_UNUSED size_t len, void *udata_ptr, + hbool_t *dirty_ptr) +{ + haddr_t addr = *(haddr_t *)udata_ptr; + hbool_t success = TRUE; + int idx; + struct datum *entry_ptr = NULL; - destroy_datum(f, thing); + HDassert(image_ptr != NULL); + idx = addr_to_datum_index(addr); + + HDassert(idx >= 0); + HDassert(idx < NUM_DATA_ENTRIES); + HDassert(idx < virt_num_data_entries); + + entry_ptr = &(data[idx]); + + HDassert(addr == entry_ptr->base_addr); + HDassert(!entry_ptr->global_pinned); + HDassert(!entry_ptr->local_pinned); + + HDassert(dirty_ptr); + + if (callbacks_verbose) { + + HDfprintf(stdout, "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n", + world_mpi_rank, idx, (long)addr, (int)len, (int)(entry_ptr->header.is_dirty)); + fflush(stdout); } - datum_clears++; + *dirty_ptr = FALSE; - if ( entry_ptr->header.is_pinned ) { + if (!success) { - datum_pinned_clears++; - HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned ); + entry_ptr = NULL; } - return(SUCCEED); + return (entry_ptr); -} /* clear_datum() */ +} /* deserialize() */ - /*------------------------------------------------------------------------- - * Function: destroy_datum() + * Function: datum_image_len * - * Purpose: Destroy the entry. At present, this means do nothing other - * than verify that the entry is clean. In particular, do not - * write it to the server process. + * Purpose: Return the real (and possibly reduced) length of the image. + * The helper functions verify that the correct version of + * deserialize is being called, and then call deserialize + * proper. * - * Return: SUCCEED + * Return: SUCCEED * - * Programmer: John Mainzer - * 12/29/05 + * Programmer: John Mainzer + * 9/19/07 * *------------------------------------------------------------------------- */ static herr_t -destroy_datum(H5F_t UNUSED * f, - void * thing) +datum_image_len(const void *thing, size_t *image_len) { - int idx; - struct datum * entry_ptr; + int idx; + const struct datum *entry_ptr; - HDassert( thing ); + HDassert(thing); + HDassert(image_len); - entry_ptr = (struct datum *)thing; + entry_ptr = (const struct datum *)thing; idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( idx >= 0 ); - HDassert( idx < NUM_DATA_ENTRIES ); - HDassert( idx < virt_num_data_entries ); - HDassert( &(data[idx]) == entry_ptr ); + HDassert(idx >= 0); + HDassert(idx < NUM_DATA_ENTRIES); + HDassert(idx < virt_num_data_entries); + HDassert(&(data[idx]) == entry_ptr); + HDassert(entry_ptr->local_len > 0); + HDassert(entry_ptr->local_len <= entry_ptr->len); - HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); - HDassert( ( entry_ptr->header.size == entry_ptr->len ) || - ( entry_ptr->header.size == entry_ptr->local_len ) ); - - HDassert( !(entry_ptr->dirty) ); - HDassert( !(entry_ptr->header.is_dirty) ); - HDassert( !(entry_ptr->global_pinned) ); - HDassert( !(entry_ptr->local_pinned) ); - HDassert( !(entry_ptr->header.is_pinned) ); + if (callbacks_verbose) { + HDfprintf(stdout, "%d: image_len() idx = %d, addr = %ld, len = %d.\n", world_mpi_rank, idx, + (long)(entry_ptr->base_addr), (int)(entry_ptr->local_len)); + fflush(stdout); + } - datum_destroys++; + HDassert(entry_ptr->header.addr == entry_ptr->base_addr); - return(SUCCEED); + *image_len = entry_ptr->local_len; -} /* destroy_datum() */ + return (SUCCEED); +} /* datum_image_len() */ - /*------------------------------------------------------------------------- - * Function: flush_datum + * Function: datum_serialize * - * Purpose: Flush the entry to the server process and mark it as clean. - * Then destroy the entry if requested. + * Purpose: Serialize the supplied entry. * - * Return: SUCCEED if successful, and FAIL otherwise. + * Return: SUCCEED if successful, FAIL otherwise. * - * Programmer: John Mainzer - * 12/29/05 + * Programmer: John Mainzer + * 10/30/07 * *------------------------------------------------------------------------- */ static herr_t -flush_datum(H5F_t *f, - hid_t UNUSED dxpl_id, - hbool_t dest, - haddr_t UNUSED addr, - void *thing) +datum_serialize(const H5F_t *f, void H5_ATTR_NDEBUG_UNUSED *image_ptr, size_t len, void *thing_ptr) { - const char * fcn_name = "flush_datum()"; - hbool_t was_dirty = FALSE; - herr_t ret_value = SUCCEED; - int idx; - struct datum * entry_ptr; - struct mssg_t mssg; - H5C_t * cache_ptr; - struct H5AC_aux_t * aux_ptr; - - HDassert( thing ); + herr_t ret_value = SUCCEED; + int idx; + struct datum *entry_ptr; + struct H5AC_aux_t *aux_ptr; - entry_ptr = (struct datum *)thing; + HDassert(thing_ptr); + HDassert(image_ptr); - HDassert( f ); - HDassert( f->shared ); - HDassert( f->shared->cache ); - - cache_ptr = f->shared->cache; + entry_ptr = (struct datum *)thing_ptr; - HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC ); - HDassert( cache_ptr->aux_ptr ); + HDassert(f); + HDassert(f->shared); + HDassert(f->shared->cache); + HDassert(f->shared->cache->magic == H5C__H5C_T_MAGIC); + HDassert(f->shared->cache->aux_ptr); aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr); - HDassert( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ); + HDassert(aux_ptr); + HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); + + entry_ptr->aux_ptr = aux_ptr; idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( idx >= 0 ); - HDassert( idx < NUM_DATA_ENTRIES ); - HDassert( idx < virt_num_data_entries ); - HDassert( &(data[idx]) == entry_ptr ); + HDassert(idx >= 0); + HDassert(idx < NUM_DATA_ENTRIES); + HDassert(idx < virt_num_data_entries); + HDassert(&(data[idx]) == entry_ptr); + + if (callbacks_verbose) { + + HDfprintf(stdout, "%d: serialize() idx = %d, addr = %ld, len = %d.\n", world_mpi_rank, idx, + (long)entry_ptr->header.addr, (int)len); + fflush(stdout); + } + + HDassert(entry_ptr->header.addr == entry_ptr->base_addr); + HDassert((entry_ptr->header.size == entry_ptr->len) || (entry_ptr->header.size == entry_ptr->local_len)); + + HDassert(entry_ptr->header.is_dirty == entry_ptr->dirty); + + datum_flushes++; + + if (entry_ptr->header.is_pinned) { + + datum_pinned_flushes++; + HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned); + } + + return (ret_value); + +} /* datum_serialize() */ + +/*------------------------------------------------------------------------- + * Function: datum_notify + * + * Purpose: Do the communication with the server we used to do in the + * flush and load callbacks in the version 2 cache. + * + * Return: SUCCEED + * + * Programmer: John Mainzer + * 1/12/15 + * + *------------------------------------------------------------------------- + */ +static herr_t +datum_notify(H5C_notify_action_t action, void *thing) +{ + hbool_t was_dirty = FALSE; + herr_t ret_value = SUCCEED; + struct datum *entry_ptr; + struct H5AC_aux_t *aux_ptr; + struct mssg_t mssg; + int idx; + + HDassert(thing); - HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); - HDassert( ( entry_ptr->header.size == entry_ptr->len ) || - ( entry_ptr->header.size == entry_ptr->local_len ) ); + entry_ptr = (struct datum *)thing; + + idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( entry_ptr->header.is_dirty == entry_ptr->dirty ); + HDassert(idx >= 0); + HDassert(idx < NUM_DATA_ENTRIES); + HDassert(idx < virt_num_data_entries); + HDassert(&(data[idx]) == entry_ptr); - if ( ( file_mpi_rank != 0 ) && - ( entry_ptr->dirty ) && - ( aux_ptr->metadata_write_strategy == - H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY ) ) { + if (callbacks_verbose) { - ret_value = FAIL; - HDfprintf(stdout, - "%d:%s: Flushed dirty entry from non-zero file process.", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d: notify() action = %d, idx = %d, addr = %ld.\n", world_mpi_rank, (int)action, + idx, (long)entry_ptr->header.addr); + fflush(stdout); } - if ( ret_value == SUCCEED ) { + HDassert(entry_ptr->header.addr == entry_ptr->base_addr); + /* Skip this check when the entry is being dirtied, since the resize + * operation sends the message before the len/local_len is updated + * (after the resize operation completes successfully) (QAK - 2016/10/19) + */ + if (H5AC_NOTIFY_ACTION_ENTRY_DIRTIED != action) + HDassert((entry_ptr->header.size == entry_ptr->len) || + (entry_ptr->header.size == entry_ptr->local_len)); + + switch (action) { + case H5AC_NOTIFY_ACTION_AFTER_INSERT: + if (callbacks_verbose) { + + HDfprintf(stdout, "%d: notify() action = insert, idx = %d, addr = %ld.\n", world_mpi_rank, + idx, (long)entry_ptr->header.addr); + fflush(stdout); + } + /* do nothing */ + break; - if ( entry_ptr->header.is_dirty ) { + case H5AC_NOTIFY_ACTION_AFTER_LOAD: + if (callbacks_verbose) { - was_dirty = TRUE; /* so we will receive the ack if requested */ + HDfprintf(stdout, "%d: notify() action = load, idx = %d, addr = %ld.\n", world_mpi_rank, idx, + (long)entry_ptr->header.addr); + fflush(stdout); + } - /* compose the message */ - mssg.req = WRITE_REQ_CODE; + /* compose the read message */ + mssg.req = READ_REQ_CODE; mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ mssg.base_addr = entry_ptr->base_addr; - mssg.len = entry_ptr->len; - mssg.ver = entry_ptr->ver; - mssg.count = 0; - mssg.magic = MSSG_MAGIC; + H5_CHECKED_ASSIGN(mssg.len, unsigned, entry_ptr->len, size_t); + mssg.ver = 0; /* bogus -- should be corrected by server */ + mssg.count = 0; /* not used */ + mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { nerrors++; ret_value = FAIL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } - else - { - entry_ptr->header.is_dirty = FALSE; - entry_ptr->dirty = FALSE; - entry_ptr->flushed = TRUE; + + if (ret_value == SUCCEED) { + + if (!recv_mssg(&mssg, READ_REQ_REPLY_CODE)) { + + nerrors++; + ret_value = FAIL; + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); + } + } } - } - } -#if DO_WRITE_REQ_ACK + if (ret_value == SUCCEED) { - if ( ( ret_value == SUCCEED ) && ( was_dirty ) ) { + if ((mssg.req != READ_REQ_REPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != entry_ptr->base_addr) || + (mssg.len != entry_ptr->len) || (mssg.ver < entry_ptr->ver) || + (mssg.magic != MSSG_MAGIC)) { - if ( ! recv_mssg(&mssg, WRITE_REQ_ACK_CODE) ) { + nerrors++; + ret_value = FAIL; + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, __func__); + } - nerrors++; - ret_value = FAIL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); - } - } else if ( ( mssg.req != WRITE_REQ_ACK_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != entry_ptr->base_addr ) || - ( mssg.len != entry_ptr->len ) || - ( mssg.ver != entry_ptr->ver ) || - ( mssg.magic != MSSG_MAGIC ) ) { +#if 0 /* This has been useful debugging code -- keep it for now. */ + if ( mssg.req != READ_REQ_REPLY_CODE ) { - nerrors++; - ret_value = FAIL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", - world_mpi_rank, fcn_name); - } - } - } + HDfprintf(stdout, + "%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n", + world_mpi_rank, __func__); + HDfprintf(stdout, "%d:%s: mssg.req = %d.\n", + world_mpi_rank, __func__, (int)(mssg.req)); + } -#endif /* DO_WRITE_REQ_ACK */ + if ( mssg.src != world_server_mpi_rank ) { - if ( ret_value == SUCCEED ) { + HDfprintf(stdout, + "%d:%s: mssg.src != world_server_mpi_rank.\n", + world_mpi_rank, __func__); + } - if ( dest ) { + if ( mssg.dest != world_mpi_rank ) { - ret_value = destroy_datum(f, thing); - } - } + HDfprintf(stdout, + "%d:%s: mssg.dest != world_mpi_rank.\n", + world_mpi_rank, __func__); + } - datum_flushes++; + if ( mssg.base_addr != entry_ptr->base_addr ) { - if ( entry_ptr->header.is_pinned ) { + HDfprintf(stdout, + "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n", + world_mpi_rank, __func__); + HDfprintf(stdout, "%d:%s: mssg.base_addr = %" PRIuHADDR ".\n", + world_mpi_rank, __func__, mssg.base_addr); + HDfprintf(stdout, + "%d:%s: entry_ptr->base_addr = %" PRIuHADDR ".\n", + world_mpi_rank, __func__, + entry_ptr->base_addr); + } - datum_pinned_flushes++; - HDassert( entry_ptr->global_pinned || entry_ptr->local_pinned ); - } + if ( mssg.len != entry_ptr->len ) { - return(ret_value); + HDfprintf(stdout, + "%d:%s: mssg.len != entry_ptr->len.\n", + world_mpi_rank, __func__); + HDfprintf(stdout, "%d:%s: mssg.len = %" PRIuHADDR ".\n", + world_mpi_rank, __func__, mssg.len); + } -} /* flush_datum() */ + if ( mssg.ver < entry_ptr->ver ) { -/*------------------------------------------------------------------------- - * Function: load_datum - * - * Purpose: Read the requested entry from the server and mark it as - * clean. - * - * Return: SUCCEED if successful, FAIL otherwise. - * - * Programmer: John Mainzer - * 12/29/05 - * - * Modifications: - * - *------------------------------------------------------------------------- - */ + HDfprintf(stdout, + "%d:%s: mssg.ver < entry_ptr->ver.\n", + world_mpi_rank, __func__); + } -static void * -load_datum(H5F_t UNUSED *f, - hid_t UNUSED dxpl_id, - haddr_t addr, - void UNUSED *udata) -{ - const char * fcn_name = "load_datum()"; - hbool_t success = TRUE; - int idx; - struct datum * entry_ptr = NULL; - struct mssg_t mssg; + if ( mssg.magic != MSSG_MAGIC ) { - idx = addr_to_datum_index(addr); + HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n", + world_mpi_rank, __func__); + } +#endif /* JRM */ + } + else { - HDassert( idx >= 0 ); - HDassert( idx < NUM_DATA_ENTRIES ); - HDassert( idx < virt_num_data_entries ); + entry_ptr->ver = mssg.ver; + entry_ptr->dirty = FALSE; + datum_loads++; + } + } + break; - entry_ptr = &(data[idx]); + case H5C_NOTIFY_ACTION_AFTER_FLUSH: + if (callbacks_verbose) { - HDassert( addr == entry_ptr->base_addr ); - HDassert( ! entry_ptr->global_pinned ); - HDassert( ! entry_ptr->local_pinned ); + HDfprintf(stdout, "%d: notify() action = flush, idx = %d, addr = %ld.\n", world_mpi_rank, idx, + (long)entry_ptr->header.addr); + fflush(stdout); + } - /* compose the read message */ - mssg.req = READ_REQ_CODE; - mssg.src = world_mpi_rank; - mssg.dest = world_server_mpi_rank; - mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = entry_ptr->base_addr; - mssg.len = entry_ptr->len; - mssg.ver = 0; /* bogus -- should be corrected by server */ - mssg.count = 0; /* not used */ - mssg.magic = MSSG_MAGIC; + HDassert(entry_ptr->aux_ptr); + HDassert(entry_ptr->aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); + aux_ptr = entry_ptr->aux_ptr; + entry_ptr->aux_ptr = NULL; - if ( ! send_mssg(&mssg, FALSE) ) { + HDassert(entry_ptr->header.is_dirty); /* JRM */ - nerrors++; - success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); - } - } + if ((file_mpi_rank != 0) && (entry_ptr->dirty) && + (aux_ptr->metadata_write_strategy == H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY)) { - if ( success ) { + ret_value = FAIL; + HDfprintf(stdout, "%d:%s: Flushed dirty entry from non-zero file process.", world_mpi_rank, + __func__); + } - if ( ! recv_mssg(&mssg, READ_REQ_REPLY_CODE) ) { + if (ret_value == SUCCEED) { - nerrors++; - success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (entry_ptr->header.is_dirty) { + + was_dirty = TRUE; /* so we will receive the ack + * if requested + */ + + /* compose the message */ + mssg.req = WRITE_REQ_CODE; + mssg.src = world_mpi_rank; + mssg.dest = world_server_mpi_rank; + mssg.mssg_num = -1; /* set by send function */ + mssg.base_addr = entry_ptr->base_addr; + H5_CHECKED_ASSIGN(mssg.len, unsigned, entry_ptr->len, size_t); + mssg.ver = entry_ptr->ver; + mssg.count = 0; + mssg.magic = MSSG_MAGIC; + + if (!send_mssg(&mssg, FALSE)) { + + nerrors++; + ret_value = FAIL; + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); + } + } + else { + entry_ptr->dirty = FALSE; + entry_ptr->flushed = TRUE; + } + } } - } - } - if ( success ) { +#if DO_WRITE_REQ_ACK - if ( ( mssg.req != READ_REQ_REPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != entry_ptr->base_addr ) || - ( mssg.len != entry_ptr->len ) || - ( mssg.ver < entry_ptr->ver ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if ((ret_value == SUCCEED) && (was_dirty)) { - nerrors++; - success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", - world_mpi_rank, fcn_name); + if (!recv_mssg(&mssg, WRITE_REQ_ACK_CODE)) { + + nerrors++; + ret_value = FAIL; + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); + } + } + else if ((mssg.req != WRITE_REQ_ACK_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != entry_ptr->base_addr) || + (mssg.len != entry_ptr->len) || (mssg.ver != entry_ptr->ver) || + (mssg.magic != MSSG_MAGIC)) { + + nerrors++; + ret_value = FAIL; + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, __func__); + } + } } -#if 0 /* This has been useful debugging code -- keep it for now. */ - if ( mssg.req != READ_REQ_REPLY_CODE ) { - HDfprintf(stdout, "%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n", - world_mpi_rank, fcn_name); - HDfprintf(stdout, "%d:%s: mssg.req = %d.\n", - world_mpi_rank, fcn_name, (int)(mssg.req)); - } +#endif /* DO_WRITE_REQ_ACK */ + + datum_flushes++; - if ( mssg.src != world_server_mpi_rank ) { + if (entry_ptr->header.is_pinned) { - HDfprintf(stdout, "%d:%s: mssg.src != world_server_mpi_rank.\n", - world_mpi_rank, fcn_name); - } + datum_pinned_flushes++; + HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned); + } + break; - if ( mssg.dest != world_mpi_rank ) { + case H5AC_NOTIFY_ACTION_BEFORE_EVICT: + if (callbacks_verbose) { - HDfprintf(stdout, "%d:%s: mssg.dest != world_mpi_rank.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d: notify() action = evict, idx = %d, addr = %ld.\n", world_mpi_rank, idx, + (long)entry_ptr->header.addr); + fflush(stdout); } - if ( mssg.base_addr != entry_ptr->base_addr ) { + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED: + if (callbacks_verbose) { - HDfprintf(stdout, - "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n", - world_mpi_rank, fcn_name); - HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n", - world_mpi_rank, fcn_name, mssg.base_addr); - HDfprintf(stdout, "%d:%s: entry_ptr->base_addr = %a.\n", - world_mpi_rank, fcn_name, entry_ptr->base_addr); + HDfprintf(stdout, "%d: notify() action = entry dirty, idx = %d, addr = %ld.\n", + world_mpi_rank, idx, (long)entry_ptr->header.addr); + fflush(stdout); } - if ( mssg.len != entry_ptr->len ) { + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_ENTRY_CLEANED: + if (callbacks_verbose) { - HDfprintf(stdout, "%d:%s: mssg.len != entry_ptr->len.\n", - world_mpi_rank, fcn_name); - HDfprintf(stdout, "%d:%s: mssg.len = %a.\n", - world_mpi_rank, fcn_name, mssg.len); + HDfprintf(stdout, "%d: notify() action = entry clean, idx = %d, addr = %ld.\n", + world_mpi_rank, idx, (long)entry_ptr->header.addr); + fflush(stdout); } - if ( mssg.ver < entry_ptr->ver ) { + entry_ptr->cleared = TRUE; + entry_ptr->dirty = FALSE; + + datum_clears++; + + if (entry_ptr->header.is_pinned) { + datum_pinned_clears++; + HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned); + } /* end if */ + + break; - HDfprintf(stdout, "%d:%s: mssg.ver < entry_ptr->ver.\n", - world_mpi_rank, fcn_name); + case H5AC_NOTIFY_ACTION_CHILD_DIRTIED: + if (callbacks_verbose) { + + HDfprintf(stdout, "%d: notify() action = child entry dirty, idx = %d, addr = %ld.\n", + world_mpi_rank, idx, (long)entry_ptr->header.addr); + fflush(stdout); } - if ( mssg.magic != MSSG_MAGIC ) { + /* do nothing */ + break; + + case H5AC_NOTIFY_ACTION_CHILD_CLEANED: + if (callbacks_verbose) { - HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d: notify() action = child entry clean, idx = %d, addr = %ld.\n", + world_mpi_rank, idx, (long)entry_ptr->header.addr); + fflush(stdout); } -#endif /* JRM */ - } else { - entry_ptr->ver = mssg.ver; - entry_ptr->header.is_dirty = FALSE; - entry_ptr->dirty = FALSE; - } - } + /* do nothing */ + break; - if ( ! success ) { + case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED: + if (callbacks_verbose) { - entry_ptr = NULL; + HDfprintf(stdout, "%d: notify() action = child entry unserialized, idx = %d, addr = %ld.\n", + world_mpi_rank, idx, (long)entry_ptr->header.addr); + fflush(stdout); + } - } + /* do nothing */ + break; - datum_loads++; + case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED: + if (callbacks_verbose) { - return(entry_ptr); + HDfprintf(stdout, "%d: notify() action = child entry serialized, idx = %d, addr = %ld.\n", + world_mpi_rank, idx, (long)entry_ptr->header.addr); + fflush(stdout); + } -} /* load_datum() */ + /* do nothing */ + break; + + default: + nerrors++; + ret_value = FAIL; + if (verbose) { + HDfprintf(stdout, "%d:%s: Unknown notify action.\n", world_mpi_rank, __func__); + } + break; + } + + return (ret_value); + +} /* datum_notify() */ /*------------------------------------------------------------------------- - * Function: size_datum - * - * Purpose: Get the size of the specified entry. Just look at the - * local copy, as size can't change. + * Function: datum_free_icr * - * Return: SUCCEED + * Purpose: Nominally, this callback is supposed to free the + * in core representation of the entry. * - * Programmer: John Mainzer - * 6/10/04 + * In the context of this test bed, we use it to do + * do all the processing we used to do on a destroy. * - * Modifications: + * Return: SUCCEED * - * JRM -- 7/11/06 - * Modified function to return the local_len field instead - * of the len field. These two fields usually contain the - * same value, but if the size of an entry is changed, we - * store the altered size in local_len without changing - * len. Note that local_len must be positive, and may - * not exceed len. + * Programmer: John Mainzer + * 9/19/07 * *------------------------------------------------------------------------- */ - static herr_t -size_datum(H5F_t UNUSED * f, - void * thing, - size_t * size_ptr) +datum_free_icr(void *thing) { - int idx; - struct datum * entry_ptr; + int idx; + struct datum *entry_ptr; - HDassert( thing ); - HDassert( size_ptr ); + HDassert(thing); entry_ptr = (struct datum *)thing; idx = addr_to_datum_index(entry_ptr->base_addr); - HDassert( idx >= 0 ); - HDassert( idx < NUM_DATA_ENTRIES ); - HDassert( idx < virt_num_data_entries ); - HDassert( &(data[idx]) == entry_ptr ); - HDassert( entry_ptr->local_len > 0 ); - HDassert( entry_ptr->local_len <= entry_ptr->len ); + HDassert(idx >= 0); + HDassert(idx < NUM_DATA_ENTRIES); + HDassert(idx < virt_num_data_entries); + HDassert(&(data[idx]) == entry_ptr); - HDassert( entry_ptr->header.addr == entry_ptr->base_addr ); + if (callbacks_verbose) { - *size_ptr = entry_ptr->local_len; + HDfprintf(stdout, "%d: free_icr() idx = %d, dirty = %d.\n", world_mpi_rank, idx, + (int)(entry_ptr->dirty)); + fflush(stdout); + } + + HDassert(entry_ptr->header.addr == entry_ptr->base_addr); + HDassert((entry_ptr->header.size == entry_ptr->len) || (entry_ptr->header.size == entry_ptr->local_len)); - return(SUCCEED); + HDassert(!(entry_ptr->header.is_dirty)); + HDassert(!(entry_ptr->global_pinned)); + HDassert(!(entry_ptr->local_pinned)); + HDassert(!(entry_ptr->header.is_pinned)); -} /* size_datum() */ + datum_destroys++; + return (SUCCEED); +} /* datum_free_icr() */ /*****************************************************************************/ /************************** test utility functions ***************************/ @@ -2812,73 +2796,65 @@ size_datum(H5F_t UNUSED * f, * Function: expunge_entry() * * Purpose: Expunge the entry indicated by the type and index, mark it - * as clean, and don't increment its version number. + * as clean, and don't increment its version number. * - * Do nothing if nerrors is non-zero on entry. + * Do nothing if nerrors is non-zero on entry. * * Return: void * * Programmer: John Mainzer * 07/11/06 * - * Modifications: - * - * None. - * *****************************************************************************/ - static void -expunge_entry(H5F_t * file_ptr, - int32_t idx) +expunge_entry(H5F_t *file_ptr, int32_t idx) { - const char * fcn_name = "expunge_entry()"; - hbool_t in_cache; - herr_t result; - struct datum * entry_ptr; + hbool_t in_cache; + herr_t result; + struct datum *entry_ptr; - HDassert( file_ptr ); - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert(file_ptr); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( !(entry_ptr->locked) ); - HDassert( !(entry_ptr->global_pinned) ); - HDassert( !(entry_ptr->local_pinned) ); + HDassert(!(entry_ptr->locked)); + HDassert(!(entry_ptr->global_pinned)); + HDassert(!(entry_ptr->local_pinned)); + + entry_ptr->dirty = FALSE; - if ( nerrors == 0 ) { + if (nerrors == 0) { - result = H5AC_expunge_entry(file_ptr, (hid_t)-1, &(types[0]), - entry_ptr->header.addr, H5AC__NO_FLAGS_SET); + result = H5AC_expunge_entry(file_ptr, &(types[0]), entry_ptr->header.addr, H5AC__NO_FLAGS_SET); - if ( result < 0 ) { + if (result < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n", world_mpi_rank, __func__); } } - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); - HDassert( ! ((entry_ptr->header).is_dirty) ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); + HDassert(!((entry_ptr->header).is_dirty)); - result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr, - NULL, &in_cache, NULL, NULL, NULL, NULL, NULL); + result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr, NULL, &in_cache, NULL, NULL, NULL, NULL, + NULL, NULL, NULL); - if ( result < 0 ) { + if (result < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n", world_mpi_rank, __func__); } - } else if ( in_cache ) { + } + else if (in_cache) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n", world_mpi_rank, __func__); } } } @@ -2887,14 +2863,13 @@ expunge_entry(H5F_t * file_ptr, } /* expunge_entry() */ - /***************************************************************************** * Function: insert_entry() * * Purpose: Insert the entry indicated by the type and index, mark it - * as dirty, and increment its version number. + * as dirty, and increment its version number. * - * Do nothing if nerrors is non-zero on entry. + * Do nothing if nerrors is non-zero on entry. * * Return: void * @@ -2909,96 +2884,84 @@ expunge_entry(H5F_t * file_ptr, * any pins must be global pins. * *****************************************************************************/ - static void -insert_entry(H5C_t * cache_ptr, - H5F_t * file_ptr, - int32_t idx, - unsigned int flags) +insert_entry(H5C_t *cache_ptr, H5F_t *file_ptr, int32_t idx, unsigned int flags) { - const char * fcn_name = "insert_entry()"; - hbool_t insert_pinned; - herr_t result; - struct datum * entry_ptr; + hbool_t insert_pinned; + herr_t result; + struct datum *entry_ptr; - HDassert( cache_ptr ); - HDassert( file_ptr ); - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert(cache_ptr); + HDassert(file_ptr); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( !(entry_ptr->locked) ); + HDassert(!(entry_ptr->locked)); - insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0 ); + insert_pinned = ((flags & H5C__PIN_ENTRY_FLAG) != 0); - if ( nerrors == 0 ) { + if (nerrors == 0) { (entry_ptr->ver)++; entry_ptr->dirty = TRUE; - result = H5AC_insert_entry(file_ptr, H5P_DATASET_XFER_DEFAULT, &(types[0]), - entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags); + result = H5AC_insert_entry(file_ptr, &(types[0]), entry_ptr->base_addr, + (void *)(&(entry_ptr->header)), flags); - if ( ( result < 0 ) || - ( entry_ptr->header.type != &(types[0]) ) || - ( entry_ptr->len != entry_ptr->header.size ) || - ( entry_ptr->base_addr != entry_ptr->header.addr ) ) { + if ((result < 0) || (entry_ptr->header.type != &(types[0])) || + (entry_ptr->len != entry_ptr->header.size) || (entry_ptr->base_addr != entry_ptr->header.addr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n", world_mpi_rank, __func__); } } - if ( ! (entry_ptr->header.is_dirty) ) { + if (!(entry_ptr->header.is_dirty)) { - /* it is possible that we just exceeded the dirty bytes - * threshold, triggering a write of the newly inserted - * entry. Test for this, and only flag an error if this - * is not the case. - */ + /* it is possible that we just exceeded the dirty bytes + * threshold, triggering a write of the newly inserted + * entry. Test for this, and only flag an error if this + * is not the case. + */ - struct H5AC_aux_t * aux_ptr; + struct H5AC_aux_t *aux_ptr; - aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr)); + aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr)); - if ( ! ( ( aux_ptr != NULL ) && - ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) && - ( aux_ptr->dirty_bytes == 0 ) ) ) { + if (!((aux_ptr != NULL) && (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC) && + (aux_ptr->dirty_bytes == 0))) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", - world_mpi_rank, fcn_name, idx, - (int)(data[idx].header.is_dirty)); - } + if (verbose) { + HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, __func__, + idx, (int)(data[idx].header.is_dirty)); + } } } - if ( insert_pinned ) { + if (insert_pinned) { - HDassert( entry_ptr->header.is_pinned ); + HDassert(entry_ptr->header.is_pinned); entry_ptr->global_pinned = TRUE; - global_pins++; - - } else { + global_pins++; + } + else { - HDassert( ! ( entry_ptr->header.is_pinned ) ); + HDassert(!(entry_ptr->header.is_pinned)); entry_ptr->global_pinned = FALSE; - } /* HDassert( entry_ptr->header.is_dirty ); */ - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); } return; } /* insert_entry() */ - /***************************************************************************** * Function: local_pin_and_unpin_random_entries() * @@ -3012,65 +2975,54 @@ insert_entry(H5C_t * cache_ptr, * Programmer: John Mainzer * 4/12/06 * - * Modifications: - * *****************************************************************************/ - static void -local_pin_and_unpin_random_entries(H5F_t * file_ptr, - int min_idx, - int max_idx, - int min_count, - int max_count) +local_pin_and_unpin_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count, int max_count) { - /* const char * fcn_name = "local_pin_and_unpin_random_entries()"; */ - if ( nerrors == 0 ) { + if (nerrors == 0) { hbool_t via_unprotect; - int count; - int i; - int idx; - - HDassert( file_ptr ); - HDassert( 0 <= min_idx ); - HDassert( min_idx < max_idx ); - HDassert( max_idx < NUM_DATA_ENTRIES ); - HDassert( max_idx < virt_num_data_entries ); - HDassert( 0 <= min_count ); - HDassert( min_count < max_count ); + int count; + int i; + int idx; + + HDassert(file_ptr); + HDassert(0 <= min_idx); + HDassert(min_idx < max_idx); + HDassert(max_idx < NUM_DATA_ENTRIES); + HDassert(max_idx < virt_num_data_entries); + HDassert(0 <= min_count); + HDassert(min_count < max_count); - count = (HDrand() % (max_count - min_count)) + min_count; + count = (HDrand() % (max_count - min_count)) + min_count; - HDassert( min_count <= count ); - HDassert( count <= max_count ); + HDassert(min_count <= count); + HDassert(count <= max_count); - for ( i = 0; i < count; i++ ) - { + for (i = 0; i < count; i++) { local_pin_random_entry(file_ptr, min_idx, max_idx); - } + } - count = (HDrand() % (max_count - min_count)) + min_count; + count = (HDrand() % (max_count - min_count)) + min_count; - HDassert( min_count <= count ); - HDassert( count <= max_count ); + HDassert(min_count <= count); + HDassert(count <= max_count); - i = 0; - idx = 0; + i = 0; + idx = 0; - while ( ( i < count ) && ( idx >= 0 ) ) - { - via_unprotect = ( (((unsigned)i) & 0x0001) == 0 ); - idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect); - i++; - } + while ((i < count) && (idx >= 0)) { + via_unprotect = ((((unsigned)i) & 0x0001) == 0); + idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect); + i++; + } } return; } /* local_pin_and_unpin_random_entries() */ - /***************************************************************************** * Function: local_pin_random_entry() * @@ -3088,28 +3040,23 @@ local_pin_and_unpin_random_entries(H5F_t * file_ptr, * *****************************************************************************/ static void -local_pin_random_entry(H5F_t * file_ptr, - int min_idx, - int max_idx) +local_pin_random_entry(H5F_t *file_ptr, int min_idx, int max_idx) { - /* const char * fcn_name = "local_pin_random_entry()"; */ int idx; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( file_ptr ); - HDassert( 0 <= min_idx ); - HDassert( min_idx < max_idx ); - HDassert( max_idx < NUM_DATA_ENTRIES ); - HDassert( max_idx < virt_num_data_entries ); + HDassert(file_ptr); + HDassert(0 <= min_idx); + HDassert(min_idx < max_idx); + HDassert(max_idx < NUM_DATA_ENTRIES); + HDassert(max_idx < virt_num_data_entries); - do - { - idx = (HDrand() % (max_idx - min_idx)) + min_idx; - HDassert( min_idx <= idx ); - HDassert( idx <= max_idx ); - } - while ( data[idx].global_pinned || data[idx].local_pinned ); + do { + idx = (HDrand() % (max_idx - min_idx)) + min_idx; + HDassert(min_idx <= idx); + HDassert(idx <= max_idx); + } while (data[idx].global_pinned || data[idx].local_pinned); pin_entry(file_ptr, idx, FALSE, FALSE); } @@ -3118,7 +3065,6 @@ local_pin_random_entry(H5F_t * file_ptr, } /* local_pin_random_entry() */ - /***************************************************************************** * Function: local_unpin_all_entries() * @@ -3131,41 +3077,33 @@ local_pin_random_entry(H5F_t * file_ptr, * Programmer: John Mainzer * 4/12/06 * - * Modifications: - * *****************************************************************************/ - static void -local_unpin_all_entries(H5F_t * file_ptr, - hbool_t via_unprotect) +local_unpin_all_entries(H5F_t *file_ptr, hbool_t via_unprotect) { - /* const char * fcn_name = "local_unpin_all_entries()"; */ - if ( nerrors == 0 ) { + if (nerrors == 0) { int idx; - HDassert( file_ptr ); + HDassert(file_ptr); - idx = 0; + idx = 0; - while ( idx >= 0 ) - { - idx = local_unpin_next_pinned_entry(file_ptr, - idx, via_unprotect); - } + while (idx >= 0) { + idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect); + } } return; } /* local_unpin_all_entries() */ - /***************************************************************************** * Function: local_unpin_next_pinned_entry() * * Purpose: Find the next locally pinned entry after the specified - * starting point, and unpin it. + * starting point, and unpin it. * * Do nothing if nerrors is non-zero on entry. * @@ -3176,59 +3114,50 @@ local_unpin_all_entries(H5F_t * file_ptr, * Programmer: John Mainzer * 4/12/06 * - * Modifications: - * *****************************************************************************/ - static int -local_unpin_next_pinned_entry(H5F_t * file_ptr, - int start_idx, - hbool_t via_unprotect) +local_unpin_next_pinned_entry(H5F_t *file_ptr, int start_idx, hbool_t via_unprotect) { - /* const char * fcn_name = "local_unpin_next_pinned_entry()"; */ - int i = 0; + int i = 0; int idx = -1; - if ( nerrors == 0 ) { - - HDassert( file_ptr ); - HDassert( 0 <= start_idx ); - HDassert( start_idx < NUM_DATA_ENTRIES ); - HDassert( start_idx < virt_num_data_entries ); + if (nerrors == 0) { - idx = start_idx; + HDassert(file_ptr); + HDassert(0 <= start_idx); + HDassert(start_idx < NUM_DATA_ENTRIES); + HDassert(start_idx < virt_num_data_entries); - while ( ( i < virt_num_data_entries ) && - ( ! ( data[idx].local_pinned ) ) ) - { - i++; - idx++; - if ( idx >= virt_num_data_entries ) { - idx = 0; - } - } + idx = start_idx; - if ( data[idx].local_pinned ) { + while ((i < virt_num_data_entries) && (!(data[idx].local_pinned))) { + i++; + idx++; + if (idx >= virt_num_data_entries) { + idx = 0; + } + } - unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect); + if (data[idx].local_pinned) { - } else { + unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect); + } + else { - idx = -1; - } + idx = -1; + } } - return(idx); + return (idx); } /* local_unpin_next_pinned_entry() */ - /***************************************************************************** * Function: lock_and_unlock_random_entries() * * Purpose: Obtain a random number in the closed interval [min_count, - * max_count]. Then protect and unprotect that number of - * random entries. + * max_count]. Then protect and unprotect that number of + * random entries. * * Do nothing if nerrors is non-zero on entry. * @@ -3237,34 +3166,25 @@ local_unpin_next_pinned_entry(H5F_t * file_ptr, * Programmer: John Mainzer * 1/12/06 * - * Modifications: - * *****************************************************************************/ - static void -lock_and_unlock_random_entries(H5F_t * file_ptr, - int min_idx, - int max_idx, - int min_count, - int max_count) +lock_and_unlock_random_entries(H5F_t *file_ptr, int min_idx, int max_idx, int min_count, int max_count) { - /* const char * fcn_name = "lock_and_unlock_random_entries()"; */ int count; int i; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( file_ptr ); - HDassert( 0 <= min_count ); - HDassert( min_count < max_count ); + HDassert(file_ptr); + HDassert(0 <= min_count); + HDassert(min_count < max_count); count = (HDrand() % (max_count - min_count)) + min_count; - HDassert( min_count <= count ); - HDassert( count <= max_count ); + HDassert(min_count <= count); + HDassert(count <= max_count); - for ( i = 0; i < count; i++ ) - { + for (i = 0; i < count; i++) { lock_and_unlock_random_entry(file_ptr, min_idx, max_idx); } } @@ -3273,12 +3193,11 @@ lock_and_unlock_random_entries(H5F_t * file_ptr, } /* lock_and_unlock_random_entries() */ - /***************************************************************************** * Function: lock_and_unlock_random_entry() * * Purpose: Protect and then unprotect a random entry with index in - * the data[] array in the close interval [min_idx, max_idx]. + * the data[] array in the close interval [min_idx, max_idx]. * * Do nothing if nerrors is non-zero on entry. * @@ -3287,40 +3206,33 @@ lock_and_unlock_random_entries(H5F_t * file_ptr, * Programmer: John Mainzer * 1/4/06 * - * Modifications: - * *****************************************************************************/ - static void -lock_and_unlock_random_entry(H5F_t * file_ptr, - int min_idx, - int max_idx) +lock_and_unlock_random_entry(H5F_t *file_ptr, int min_idx, int max_idx) { - /* const char * fcn_name = "lock_and_unlock_random_entry()"; */ int idx; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( file_ptr ); - HDassert( 0 <= min_idx ); - HDassert( min_idx < max_idx ); - HDassert( max_idx < NUM_DATA_ENTRIES ); - HDassert( max_idx < virt_num_data_entries ); + HDassert(file_ptr); + HDassert(0 <= min_idx); + HDassert(min_idx < max_idx); + HDassert(max_idx < NUM_DATA_ENTRIES); + HDassert(max_idx < virt_num_data_entries); idx = (HDrand() % (max_idx - min_idx)) + min_idx; - HDassert( min_idx <= idx ); - HDassert( idx <= max_idx ); + HDassert(min_idx <= idx); + HDassert(idx <= max_idx); - lock_entry(file_ptr, idx); - unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET); + lock_entry(file_ptr, idx); + unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET); } return; } /* lock_and_unlock_random_entry() */ - /***************************************************************************** * Function: lock_entry() * @@ -3335,58 +3247,51 @@ lock_and_unlock_random_entry(H5F_t * file_ptr, * * Modifications: * - * JRM -- 7/11/06 - * Modified asserts to handle the new local_len field in - * datum. + * JRM -- 7/11/06 + * Modified asserts to handle the new local_len field in + * datum. * *****************************************************************************/ - static void -lock_entry(H5F_t * file_ptr, - int32_t idx) +lock_entry(H5F_t *file_ptr, int32_t idx) { - const char * fcn_name = "lock_entry()"; - struct datum * entry_ptr; - H5C_cache_entry_t * cache_entry_ptr; + struct datum *entry_ptr; + H5C_cache_entry_t *cache_entry_ptr; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( ! (entry_ptr->locked) ); + HDassert(!(entry_ptr->locked)); - cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr, - H5P_DATASET_XFER_DEFAULT, &(types[0]), entry_ptr->base_addr, - NULL, H5AC_WRITE); + cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr, &(types[0]), entry_ptr->base_addr, + &entry_ptr->base_addr, H5AC__NO_FLAGS_SET); - if ( ( cache_entry_ptr != (void *)(&(entry_ptr->header)) ) || - ( entry_ptr->header.type != &(types[0]) ) || - ( ( entry_ptr->len != entry_ptr->header.size ) && - ( entry_ptr->local_len != entry_ptr->header.size ) ) || - ( entry_ptr->base_addr != entry_ptr->header.addr ) ) { + if ((cache_entry_ptr != (void *)(&(entry_ptr->header))) || (entry_ptr->header.type != &(types[0])) || + ((entry_ptr->len != entry_ptr->header.size) && + (entry_ptr->local_len != entry_ptr->header.size)) || + (entry_ptr->base_addr != entry_ptr->header.addr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n", world_mpi_rank, __func__); } - } else { - - entry_ptr->locked = TRUE; + } + else { - } + entry_ptr->locked = TRUE; + } - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); } return; } /* lock_entry() */ - /***************************************************************************** * Function: mark_entry_dirty() * @@ -3400,49 +3305,43 @@ lock_entry(H5F_t * file_ptr, * 4/14/06 * *****************************************************************************/ - static void mark_entry_dirty(int32_t idx) { - const char * fcn_name = "mark_entry_dirty()"; - herr_t result; - struct datum * entry_ptr; + herr_t result; + struct datum *entry_ptr; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert ( entry_ptr->locked || entry_ptr->global_pinned ); - HDassert ( ! (entry_ptr->local_pinned) ); + HDassert(entry_ptr->locked || entry_ptr->global_pinned); + HDassert(!(entry_ptr->local_pinned)); (entry_ptr->ver)++; entry_ptr->dirty = TRUE; - result = H5AC_mark_entry_dirty( (void *)entry_ptr); + result = H5AC_mark_entry_dirty((void *)entry_ptr); - if ( result < 0 ) { + if (result < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: error in H5AC_mark_entry_dirty().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: error in H5AC_mark_entry_dirty().\n", world_mpi_rank, __func__); } } - else if ( ! ( entry_ptr->locked ) ) - { - global_dirty_pins++; - } + else if (!(entry_ptr->locked)) { + global_dirty_pins++; + } } return; } /* mark_entry_dirty() */ - /***************************************************************************** * Function: pin_entry() * @@ -3455,150 +3354,126 @@ mark_entry_dirty(int32_t idx) * Programmer: John Mainzer * 4/11/06 * - * Modifications: - * *****************************************************************************/ - static void -pin_entry(H5F_t * file_ptr, - int32_t idx, - hbool_t global, - hbool_t dirty) +pin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty) { - /* const char * fcn_name = "pin_entry()"; */ - unsigned int flags = H5AC__PIN_ENTRY_FLAG; - struct datum * entry_ptr; + unsigned int flags = H5AC__PIN_ENTRY_FLAG; + struct datum *entry_ptr; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( file_ptr ); - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert(file_ptr); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert ( ! (entry_ptr->global_pinned) ); - HDassert ( ! (entry_ptr->local_pinned) ); - HDassert ( ! ( dirty && ( ! global ) ) ); - - lock_entry(file_ptr, idx); - - if ( dirty ) { + HDassert(!(entry_ptr->global_pinned)); + HDassert(!(entry_ptr->local_pinned)); + HDassert(!(dirty && (!global))); - flags |= H5AC__DIRTIED_FLAG; - } + lock_entry(file_ptr, idx); - unlock_entry(file_ptr, idx, flags); + if (dirty) { - HDassert( (entry_ptr->header).is_pinned ); - HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) ); + flags |= H5AC__DIRTIED_FLAG; + } - if ( global ) { + unlock_entry(file_ptr, idx, flags); - entry_ptr->global_pinned = TRUE; + HDassert((entry_ptr->header).is_pinned); + HDassert((!dirty) || ((entry_ptr->header).is_dirty)); - global_pins++; + if (global) { - } else { + entry_ptr->global_pinned = TRUE; - entry_ptr->local_pinned = TRUE; + global_pins++; + } + else { - local_pins++; + entry_ptr->local_pinned = TRUE; - } + local_pins++; + } } return; } /* pin_entry() */ - -#ifdef H5_METADATA_TRACE_FILE /***************************************************************************** * Function: pin_protected_entry() * * Purpose: Insert the entry indicated by the type and index, mark it - * as dirty, and increment its version number. + * as dirty, and increment its version number. * - * Do nothing if nerrors is non-zero on entry. + * Do nothing if nerrors is non-zero on entry. * * Return: void * * Programmer: John Mainzer * 01/04/06 * - * Modifications: - * - * None. - * *****************************************************************************/ - static void -pin_protected_entry(int32_t idx, - hbool_t global) +pin_protected_entry(int32_t idx, hbool_t global) { - const char * fcn_name = "pin_protected_entry()"; - herr_t result; - struct datum * entry_ptr; + herr_t result; + struct datum *entry_ptr; - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( entry_ptr->locked ); + HDassert(entry_ptr->locked); - if ( nerrors == 0 ) { + if (nerrors == 0) { - result = H5AC_pin_protected_entry((void *)entry_ptr); + result = H5AC_pin_protected_entry((void *)entry_ptr); - if ( ( result < 0 ) || - ( entry_ptr->header.type != &(types[0]) ) || - ( ( entry_ptr->len != entry_ptr->header.size ) && - ( entry_ptr->local_len != entry_ptr->header.size ) )|| - ( entry_ptr->base_addr != entry_ptr->header.addr ) || - ( ! ( (entry_ptr->header).is_pinned ) ) ) { + if ((result < 0) || (entry_ptr->header.type != &(types[0])) || + ((entry_ptr->len != entry_ptr->header.size) && + (entry_ptr->local_len != entry_ptr->header.size)) || + (entry_ptr->base_addr != entry_ptr->header.addr) || (!((entry_ptr->header).is_pinned))) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: Error in H5AC_pin_protected entry().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Error in H5AC_pin_protected entry().\n", world_mpi_rank, __func__); } } - if ( global ) { - - entry_ptr->global_pinned = TRUE; + if (global) { - global_pins++; - - } else { + entry_ptr->global_pinned = TRUE; - entry_ptr->local_pinned = TRUE; + global_pins++; + } + else { - local_pins++; + entry_ptr->local_pinned = TRUE; - } + local_pins++; + } - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); } return; } /* pin_protected_entry() */ -#endif /* H5_METADATA_TRACE_FILE */ - /***************************************************************************** * Function: move_entry() * * Purpose: Move the entry indicated old_idx to the entry indicated - * by new_idex. Touch up the data array so that flush will - * not choke. + * by new_idex. Touch up the data array so that flush will + * not choke. * - * Do nothing if nerrors isn't zero, or if old_idx equals - * new_idx. + * Do nothing if nerrors isn't zero, or if old_idx equals + * new_idx. * * Return: void * @@ -3607,34 +3482,31 @@ pin_protected_entry(int32_t idx, * *****************************************************************************/ static void -move_entry(H5F_t * file_ptr, - int32_t old_idx, - int32_t new_idx) +move_entry(H5F_t *file_ptr, int32_t old_idx, int32_t new_idx) { - const char * fcn_name = "move_entry()"; - herr_t result; - int tmp; - size_t tmp_len; - haddr_t old_addr = HADDR_UNDEF; - haddr_t new_addr = HADDR_UNDEF; - struct datum * old_entry_ptr; - struct datum * new_entry_ptr; - - if ( ( nerrors == 0 ) && ( old_idx != new_idx ) ) { - - HDassert( file_ptr ); - HDassert( ( 0 <= old_idx ) && ( old_idx < NUM_DATA_ENTRIES ) ); - HDassert( old_idx < virt_num_data_entries ); - HDassert( ( 0 <= new_idx ) && ( new_idx < NUM_DATA_ENTRIES ) ); - HDassert( new_idx < virt_num_data_entries ); + herr_t result; + int tmp; + size_t tmp_len; + haddr_t old_addr = HADDR_UNDEF; + haddr_t new_addr = HADDR_UNDEF; + struct datum *old_entry_ptr; + struct datum *new_entry_ptr; + + if ((nerrors == 0) && (old_idx != new_idx)) { + + HDassert(file_ptr); + HDassert((0 <= old_idx) && (old_idx < NUM_DATA_ENTRIES)); + HDassert(old_idx < virt_num_data_entries); + HDassert((0 <= new_idx) && (new_idx < NUM_DATA_ENTRIES)); + HDassert(new_idx < virt_num_data_entries); old_entry_ptr = &(data[old_idx]); new_entry_ptr = &(data[new_idx]); - HDassert( ((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); - HDassert( !(old_entry_ptr->header.is_protected) ); - HDassert( !(old_entry_ptr->locked) ); - HDassert( old_entry_ptr->len == new_entry_ptr->len ); + HDassert(((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); + HDassert(!(old_entry_ptr->header.is_protected)); + HDassert(!(old_entry_ptr->locked)); + HDassert(old_entry_ptr->len == new_entry_ptr->len); old_addr = old_entry_ptr->base_addr; new_addr = new_entry_ptr->base_addr; @@ -3642,12 +3514,12 @@ move_entry(H5F_t * file_ptr, /* Moving will mark the entry dirty if it is not already */ old_entry_ptr->dirty = TRUE; - /* touch up versions, base_addrs, and data_index. Do this - * now as it is possible that the rename will trigger a + /* touch up versions, base_addrs, and data_index. Do this + * now as it is possible that the rename will trigger a * sync point. */ - if(old_entry_ptr->ver < new_entry_ptr->ver) - old_entry_ptr->ver = new_entry_ptr->ver; + if (old_entry_ptr->ver < new_entry_ptr->ver) + old_entry_ptr->ver = new_entry_ptr->ver; else (old_entry_ptr->ver)++; @@ -3661,82 +3533,77 @@ move_entry(H5F_t * file_ptr, old_entry_ptr->index = new_entry_ptr->index; new_entry_ptr->index = tmp; - if(old_entry_ptr->local_len != new_entry_ptr->local_len) { - tmp_len = old_entry_ptr->local_len; - old_entry_ptr->local_len = new_entry_ptr->local_len; - new_entry_ptr->local_len = tmp_len; - } /* end if */ + if (old_entry_ptr->local_len != new_entry_ptr->local_len) { + tmp_len = old_entry_ptr->local_len; + old_entry_ptr->local_len = new_entry_ptr->local_len; + new_entry_ptr->local_len = tmp_len; + } /* end if */ result = H5AC_move_entry(file_ptr, &(types[0]), old_addr, new_addr); - if ( ( result < 0 ) || ( old_entry_ptr->header.addr != new_addr ) ) { + if ((result < 0) || (old_entry_ptr->header.addr != new_addr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n", world_mpi_rank, __func__); } + } + else { - } else { - - HDassert( ((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); + HDassert(((old_entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); - if ( ! (old_entry_ptr->header.is_dirty) ) { + if (!(old_entry_ptr->header.is_dirty)) { - /* it is possible that we just exceeded the dirty bytes - * threshold, triggering a write of the newly inserted - * entry. Test for this, and only flag an error if this - * is not the case. - */ + /* it is possible that we just exceeded the dirty bytes + * threshold, triggering a write of the newly inserted + * entry. Test for this, and only flag an error if this + * is not the case. + */ - struct H5AC_aux_t * aux_ptr; + struct H5AC_aux_t *aux_ptr; - aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr)); + aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr)); - if ( ! ( ( aux_ptr != NULL ) && - ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) && - ( aux_ptr->dirty_bytes == 0 ) ) ) { + if (!((aux_ptr != NULL) && (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC) && + (aux_ptr->dirty_bytes == 0))) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: data[%d].header.is_dirty = %d.\n", - world_mpi_rank, fcn_name, new_idx, - (int)(data[new_idx].header.is_dirty)); - } + if (verbose) { + HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n", world_mpi_rank, __func__, + new_idx, (int)(data[new_idx].header.is_dirty)); + } } - } else { + } + else { - HDassert( old_entry_ptr->header.is_dirty ); + HDassert(old_entry_ptr->header.is_dirty); } } } } /* move_entry() */ - /***************************************************************************** * - * Function: reset_server_counts() + * Function: reset_server_counts() * - * Purpose: Send a message to the server process requesting it to reset - * its counters. Await confirmation message. + * Purpose: Send a message to the server process requesting it to reset + * its counters. Await confirmation message. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/6/10 + * Programmer: JRM -- 5/6/10 * *****************************************************************************/ static hbool_t reset_server_counts(void) { - const char * fcn_name = "reset_server_counts()"; - hbool_t success = TRUE; /* will set to FALSE if appropriate. */ + hbool_t success = TRUE; /* will set to FALSE if appropriate. */ struct mssg_t mssg; - if ( success ) { + if (success) { /* compose the message */ mssg.req = REQ_RW_COUNT_RESET_CODE; @@ -3749,113 +3616,96 @@ reset_server_counts(void) mssg.count = 0; mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ! recv_mssg(&mssg, REQ_RW_COUNT_RESET_RPLY_CODE) ) { + if (!recv_mssg(&mssg, REQ_RW_COUNT_RESET_RPLY_CODE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); - } - } else if ( ( mssg.req != REQ_RW_COUNT_RESET_RPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != 0 ) || - ( mssg.len != 0 ) || - ( mssg.ver != 0 ) || - ( mssg.count != 0 ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); + } + } + else if ((mssg.req != REQ_RW_COUNT_RESET_RPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != 0) || (mssg.len != 0) || + (mssg.ver != 0) || (mssg.count != 0) || (mssg.magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: Bad data in req r/w counter reset reply.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in req r/w counter reset reply.\n", world_mpi_rank, + __func__); } } } - return(success); + return (success); } /* reset_server_counts() */ - /***************************************************************************** * Function: resize_entry() * * Purpose: Resize the pinned entry indicated by idx to the new_size. - * Note that new_size must be greater than 0, and must be - * less than or equal to the original size of the entry. + * Note that new_size must be greater than 0, and must be + * less than or equal to the original size of the entry. * - * Do nothing if nerrors isn't zero. + * Do nothing if nerrors isn't zero. * * Return: void * * Programmer: John Mainzer * 7/11/06 * - * Modifications: - * - * None - * *****************************************************************************/ - static void -resize_entry(int32_t idx, - size_t new_size) +resize_entry(int32_t idx, size_t new_size) { - const char * fcn_name = "resize_entry()"; - herr_t result; - struct datum * entry_ptr; + herr_t result; + struct datum *entry_ptr; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); - HDassert( !(entry_ptr->locked) ); - HDassert( ( entry_ptr->global_pinned ) && - ( ! entry_ptr->local_pinned ) ); - HDassert( ( entry_ptr->header.size == entry_ptr->len ) || - ( entry_ptr->header.size == entry_ptr->local_len ) ); - HDassert( new_size > 0 ); - HDassert( new_size <= entry_ptr->len ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); + HDassert(!(entry_ptr->locked)); + HDassert((entry_ptr->global_pinned) && (!entry_ptr->local_pinned)); + HDassert((entry_ptr->header.size == entry_ptr->len) || + (entry_ptr->header.size == entry_ptr->local_len)); + HDassert(new_size > 0); + HDassert(new_size <= entry_ptr->len); - result = H5AC_resize_entry((void *)entry_ptr, new_size); + result = H5AC_resize_entry((void *)entry_ptr, new_size); - if ( result < 0 ) { + if (result < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n", world_mpi_rank, __func__); } + } + else { - } else { - - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); - HDassert( entry_ptr->header.is_dirty ); - HDassert( entry_ptr->header.size == new_size ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); + HDassert(entry_ptr->header.is_dirty); + HDassert(entry_ptr->header.size == new_size); - entry_ptr->dirty = TRUE; - entry_ptr->local_len = new_size; + entry_ptr->dirty = TRUE; + entry_ptr->local_len = new_size; /* touch up version. */ @@ -3867,121 +3717,115 @@ resize_entry(int32_t idx, } /* resize_entry() */ - /***************************************************************************** * - * Function: setup_cache_for_test() + * Function: setup_cache_for_test() * - * Purpose: Setup the parallel cache for a test, and return the file id - * and a pointer to the cache's internal data structures. + * Purpose: Setup the parallel cache for a test, and return the file id + * and a pointer to the cache's internal data structures. * - * To do this, we must create a file, flush it (so that we - * don't have to worry about entries in the metadata cache), - * look up the address of the metadata cache, and then instruct - * the cache to omit sanity checks on dxpl IDs. + * To do this, we must create a file, flush it (so that we + * don't have to worry about entries in the metadata cache), + * look up the address of the metadata cache, and then instruct + * the cache to omit sanity checks on dxpl IDs. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 1/4/06 + * Programmer: JRM -- 1/4/06 * *****************************************************************************/ static hbool_t -setup_cache_for_test(hid_t * fid_ptr, - H5F_t ** file_ptr_ptr, - H5C_t ** cache_ptr_ptr, - int metadata_write_strategy) +setup_cache_for_test(hid_t *fid_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, int metadata_write_strategy) { - const char * fcn_name = "setup_cache_for_test()"; - hbool_t success = FALSE; /* will set to TRUE if appropriate. */ - hbool_t enable_rpt_fcn = FALSE; - hid_t fid = -1; + hbool_t success = FALSE; /* will set to TRUE if appropriate. */ + hbool_t enable_rpt_fcn = FALSE; + hid_t fid = -1; H5AC_cache_config_t config; H5AC_cache_config_t test_config; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + haddr_t actual_base_addr; - HDassert ( fid_ptr != NULL ); - HDassert ( file_ptr_ptr != NULL ); - HDassert ( cache_ptr_ptr != NULL ); + HDassert(fid_ptr != NULL); + HDassert(file_ptr_ptr != NULL); + HDassert(cache_ptr_ptr != NULL); fid = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - if ( fid < 0 ) { + /* Push API context */ + H5CX_push(); + + if (fid < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n", world_mpi_rank, __func__); } - } else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + } + else if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); } - } else { - file_ptr = (H5F_t *)H5I_object_verify(fid, H5I_FILE); + } + else { + file_ptr = (H5F_t *)H5VL_object_verify(fid, H5I_FILE); } - if ( file_ptr == NULL ) { + if (file_ptr == NULL) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n", world_mpi_rank, __func__); } - } else { + } + else { cache_ptr = file_ptr->shared->cache; } - if ( cache_ptr == NULL ) { + if (cache_ptr == NULL) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n", world_mpi_rank, __func__); } - } else if ( cache_ptr->magic != H5C__H5C_T_MAGIC ) { + } + else if (cache_ptr->magic != H5C__H5C_T_MAGIC) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n", world_mpi_rank, __func__); } - } else { + } + else { cache_ptr->ignore_tags = TRUE; - *fid_ptr = fid; - *file_ptr_ptr = file_ptr; - *cache_ptr_ptr = cache_ptr; + *fid_ptr = fid; + *file_ptr_ptr = file_ptr; + *cache_ptr_ptr = cache_ptr; H5C_stats__reset(cache_ptr); success = TRUE; } - if ( success ) { + if (success) { config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) - != SUCCEED ) { - - HDfprintf(stdout, - "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n", - world_mpi_rank, fcn_name); + if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) { - } else { + HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n", world_mpi_rank, + __func__); + } + else { config.rpt_fcn_enabled = enable_rpt_fcn; config.metadata_write_strategy = metadata_write_strategy; - if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) - != SUCCEED ) { - - HDfprintf(stdout, - "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) { - } else if ( enable_rpt_fcn ) { + HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, + __func__); + } + else if (enable_rpt_fcn) { - HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d:%s: rpt_fcn enabled.\n", world_mpi_rank, __func__); } } } @@ -3991,222 +3835,239 @@ setup_cache_for_test(hid_t * fid_ptr, * we can't do our usual checks in the serial case. */ - if ( success ) /* verify that the metadata write strategy is as expected */ + if (success) /* verify that the metadata write strategy is as expected */ { - if ( cache_ptr->aux_ptr == NULL ) { + if (cache_ptr->aux_ptr == NULL) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n", world_mpi_rank, __func__); } - } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != - H5AC__H5AC_AUX_T_MAGIC ) { + } + else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic != H5AC__H5AC_AUX_T_MAGIC) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n", + world_mpi_rank, __func__); } - } else if( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy - != metadata_write_strategy ) { + } + else if (((H5AC_aux_t *)(cache_ptr->aux_ptr))->metadata_write_strategy != metadata_write_strategy) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n", world_mpi_rank, + __func__); } } } - /* also verify that the expected metadata write strategy is reported + /* also verify that the expected metadata write strategy is reported * when we get the current configuration. */ - if ( success ) { + if (success) { test_config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - if ( H5AC_get_cache_auto_resize_config(cache_ptr, &test_config) - != SUCCEED ) { + if (H5AC_get_cache_auto_resize_config(cache_ptr, &test_config) != SUCCEED) { + + HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n", world_mpi_rank, + __func__); + } + else if (test_config.metadata_write_strategy != metadata_write_strategy) { + + nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { - } else if ( test_config.metadata_write_strategy != - metadata_write_strategy ) { + HDfprintf(stdout, "%d:%s: unexpected metadata_write_strategy.\n", world_mpi_rank, __func__); + } + } + } + /* allocate space for test entries -- do this before we set the + * sync point done callback as it will dirty the superblock, requiring + * another flush. If the sync point done callback is set, this will + * cause a spurious failure. + */ + if (success) { /* allocate space for test entries */ + + actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, (hsize_t)(max_addr + BASE_ADDR)); + + if (actual_base_addr == HADDR_UNDEF) { + + success = FALSE; nerrors++; - if ( verbose ) { + if (verbose) { + HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n", world_mpi_rank, __func__); + } + } + else if (actual_base_addr > BASE_ADDR) { - HDfprintf(stdout, - "%d:%s: unexpected metadata_write_strategy.\n", - world_mpi_rank, fcn_name); + /* If this happens, must increase BASE_ADDR so that the + * actual_base_addr is <= BASE_ADDR. This should only happen + * if the size of the superblock is increase. + */ + success = FALSE; + nerrors++; + + if (verbose) { + HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n", world_mpi_rank, __func__); } } } + /* flush the file again -- space allocation dirtied superblock */ + if (success) { + + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n", world_mpi_rank, __func__); + } + } + } #if DO_SYNC_AFTER_WRITE - if ( success ) { + if (success) { - if ( H5AC_set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) { + if (H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: H5C_set_write_done_callback failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5C_set_write_done_callback failed.\n", world_mpi_rank, __func__); } - } + } } #endif /* DO_SYNC_AFTER_WRITE */ - if ( success ) { + if (success) { - if ( H5AC_set_sync_point_done_callback(cache_ptr, verify_writes) != - SUCCEED ) { + if (H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: H5AC_set_sync_point_done_callback failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank, + __func__); } - } + } } - return(success); + return (success); } /* setup_cache_for_test() */ - /***************************************************************************** * - * Function: verify_writes() + * Function: verify_writes() * - * Purpose: Verify that the indicated entries have been written exactly - * once each, and that the indicated total number of writes - * has been processed by the server process. Flag an error if - * discrepency is noted. Finally reset the counters maintained - * by the server process. + * Purpose: Verify that the indicated entries have been written exactly + * once each, and that the indicated total number of writes + * has been processed by the server process. Flag an error if + * discrepancy is noted. Finally reset the counters maintained + * by the server process. * - * This function should only be called by the metadata cache - * as the "sync point done" function, as it must do some - * synchronization to avoid false positives. + * This function should only be called by the metadata cache + * as the "sync point done" function, as it must do some + * synchronization to avoid false positives. * - * Note that at present, this function does not allow for the - * case in which one or more of the indicated entries should - * have been written more than once since the last time the - * server process's counters were reset. That is fine for now, - * as with the current metadata write strategies, no entry - * should be written more than once per sync point. If this - * changes this limitation will have to be revisited. + * Note that at present, this function does not allow for the + * case in which one or more of the indicated entries should + * have been written more than once since the last time the + * server process's counters were reset. That is fine for now, + * as with the current metadata write strategies, no entry + * should be written more than once per sync point. If this + * changes this limitation will have to be revisited. * - * Return: void. + * Return: void. * - * Programmer: JRM -- 5/9/10 + * Programmer: JRM -- 5/9/10 * *****************************************************************************/ static void -verify_writes(int num_writes, - haddr_t * written_entries_tbl) +verify_writes(unsigned num_writes, haddr_t *written_entries_tbl) { - const char * fcn_name = "verify_writes()"; - const hbool_t report = FALSE; - hbool_t proceed = TRUE; - int i = 0; + const hbool_t report = FALSE; + hbool_t proceed = TRUE; + unsigned u = 0; - HDassert( world_mpi_rank != world_server_mpi_rank ); - HDassert( num_writes >= 0 ); - HDassert( ( num_writes == 0 ) || - ( written_entries_tbl != NULL ) ); + HDassert(world_mpi_rank != world_server_mpi_rank); + HDassert((num_writes == 0) || (written_entries_tbl != NULL)); /* barrier to ensure that all other processes are ready to leave * the sync point as well. */ - if ( proceed ) { + if (proceed) { - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { proceed = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, __func__); } } } - if ( proceed ) { - + if (proceed) proceed = verify_total_writes(num_writes); - } - while ( ( proceed ) && ( i < num_writes ) ) - { - proceed = verify_entry_writes(written_entries_tbl[i], 1); - i++; + while (proceed && u < num_writes) { + proceed = verify_entry_writes(written_entries_tbl[u], 1); + u++; } /* barrier to ensure that all other processes have finished verifying * the number of writes before we reset the counters. */ - if ( proceed ) { + if (proceed) { - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { proceed = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, __func__); } } } - if ( proceed ) { + if (proceed) { proceed = reset_server_counts(); } /* if requested, display status of check to stdout */ - if ( ( report ) && ( file_mpi_rank == 0 ) ) { - - if ( proceed ) { - - HDfprintf(stdout, "%d:%s: verified %d writes.\n", - world_mpi_rank, fcn_name, num_writes); + if ((report) && (file_mpi_rank == 0)) { - } else { + if (proceed) { - HDfprintf(stdout, "%d:%s: FAILED to verify %d writes.\n", - world_mpi_rank, fcn_name, num_writes); + HDfprintf(stdout, "%d:%s: verified %u writes.\n", world_mpi_rank, __func__, num_writes); + } + else { + HDfprintf(stdout, "%d:%s: FAILED to verify %u writes.\n", world_mpi_rank, __func__, num_writes); } } /* final barrier to ensure that all processes think that the server - * counters have been reset before we leave the sync point. This - * barrier is probaby not necessary at this point in time (5/9/10), + * counters have been reset before we leave the sync point. This + * barrier is probably not necessary at this point in time (5/9/10), * but I can think of at least one likely change to the metadata write * strategies that will require it -- hence its insertion now. */ - if ( proceed ) { + if (proceed) { - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { proceed = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, __func__); } } } @@ -4215,63 +4076,57 @@ verify_writes(int num_writes, } /* verify_writes() */ - /***************************************************************************** * - * Function: setup_rand() + * Function: setup_rand() * - * Purpose: Use gettimeofday() to obtain a seed for rand(), print the - * seed to stdout, and then pass it to srand(). + * Purpose: Use gettimeofday() to obtain a seed for rand(), print the + * seed to stdout, and then pass it to srand(). * - * Increment nerrors if any errors are detected. + * Increment nerrors if any errors are detected. * - * Return: void. + * Return: void. * - * Programmer: JRM -- 1/12/06 + * Programmer: JRM -- 1/12/06 * * Modifications: * - * JRM -- 5/9/06 - * Modified function to facilitate setting predefined seeds. + * JRM -- 5/9/06 + * Modified function to facilitate setting predefined seeds. * *****************************************************************************/ - static void setup_rand(void) { - const char * fcn_name = "setup_rand()"; - hbool_t use_predefined_seeds = FALSE; - int num_predefined_seeds = 3; - unsigned predefined_seeds[3] = {33402, 33505, 33422}; - unsigned seed; + hbool_t use_predefined_seeds = FALSE; + int num_predefined_seeds = 3; + unsigned predefined_seeds[3] = {18669, 89925, 12577}; + unsigned seed; struct timeval tv; - if ( ( use_predefined_seeds ) && - ( world_mpi_size == num_predefined_seeds ) ) { - - HDassert( world_mpi_rank >= 0 ); - HDassert( world_mpi_rank < world_mpi_size ); + if ((use_predefined_seeds) && (world_mpi_size == num_predefined_seeds)) { - seed = predefined_seeds[world_mpi_rank]; - HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n", - world_mpi_rank, fcn_name, seed); - fflush(stdout); - HDsrand(seed); + HDassert(world_mpi_rank >= 0); + HDassert(world_mpi_rank < world_mpi_size); - } else { + seed = predefined_seeds[world_mpi_rank]; + HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n", world_mpi_rank, __func__, seed); + fflush(stdout); + HDsrand(seed); + } + else { - if ( HDgettimeofday(&tv, NULL) != 0 ) { + if (HDgettimeofday(&tv, NULL) != 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n", world_mpi_rank, __func__); } - } else { + } + else { seed = (unsigned)tv.tv_usec; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: seed = %d.\n", - world_mpi_rank, fcn_name, seed); + if (verbose) { + HDfprintf(stdout, "%d:%s: seed = %d.\n", world_mpi_rank, __func__, seed); fflush(stdout); } HDsrand(seed); @@ -4282,78 +4137,108 @@ setup_rand(void) } /* setup_rand() */ - /***************************************************************************** * - * Function: take_down_cache() + * Function: take_down_cache() * - * Purpose: Take down the parallel cache after a test. + * Purpose: Take down the parallel cache after a test. * - * To do this, we must close the file, and delete if if - * possible. + * To do this, we must close the file, and delete if if + * possible. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 1/4/06 - * - * Modifications: - * - * None. + * Programmer: JRM -- 1/4/06 * *****************************************************************************/ - static hbool_t -take_down_cache(hid_t fid) +take_down_cache(hid_t fid, H5C_t *cache_ptr) { - const char * fcn_name = "take_down_cache()"; - hbool_t success = FALSE; /* will set to TRUE if appropriate. */ + hbool_t success = TRUE; /* will set to FALSE if appropriate. */ - /* close the file and delete it */ - if ( H5Fclose(fid) < 0 ) { + /* flush the file -- this should write out any remaining test + * entries in the cache. + */ + if ((success) && (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)) { + success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); } + } - } else if ( world_mpi_rank == world_server_mpi_rank ) { + /* Now reset the sync point done callback. Must do this as with + * the SWMR mods, the cache will do additional I/O on file close + * un-related to the test entries, and thereby corrupt our counts + * of entry writes. + */ + if (success) { - if ( HDremove(filenames[0]) < 0 ) { + if (H5AC__set_sync_point_done_callback(cache_ptr, NULL) != SUCCEED) { + success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: HDremove() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5AC__set_sync_point_done_callback failed.\n", world_mpi_rank, + __func__); } - } else { + } + } - success = TRUE; + /* close the file */ + if ((success) && (H5Fclose(fid) < 0)) { + + success = FALSE; + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fclose() failed.\n", world_mpi_rank, __func__); } - } else { + } - success = TRUE; + /* Pop API context */ + H5CX_pop(FALSE); + + if (success) { + + if (world_mpi_rank == world_server_mpi_rank) { + + if (HDremove(filenames[0]) < 0) { + + success = FALSE; + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: HDremove() failed.\n", world_mpi_rank, __func__); + } + } + } + else { + + /* verify that there have been no further writes of test + * entries during the close + */ + success = verify_total_writes(0); + } } - return(success); + return (success); } /* take_down_cache() */ - /***************************************************************************** * Function: verify_entry_reads * - * Purpose: Query the server to determine the number of times the - * indicated entry has been read since the last time the - * server counters were reset. + * Purpose: Query the server to determine the number of times the + * indicated entry has been read since the last time the + * server counters were reset. * - * Return TRUE if successful, and if the supplied expected - * number of reads matches the number of reads reported by - * the server process. + * Return TRUE if successful, and if the supplied expected + * number of reads matches the number of reads reported by + * the server process. * - * Return FALSE and flag an error otherwise. + * Return FALSE and flag an error otherwise. * * Return: TRUE if successful, FALSE otherwise. * @@ -4363,15 +4248,13 @@ take_down_cache(hid_t fid) *------------------------------------------------------------------------- */ static hbool_t -verify_entry_reads(haddr_t addr, - int expected_entry_reads) +verify_entry_reads(haddr_t addr, int expected_entry_reads) { - const char * fcn_name = "verify_entry_reads()"; - hbool_t success = TRUE; - int reported_entry_reads; + hbool_t success = TRUE; + int reported_entry_reads = 0; struct mssg_t mssg; - if ( success ) { + if (success) { /* compose the message */ mssg.req = REQ_ENTRY_READS_CODE; @@ -4384,84 +4267,75 @@ verify_entry_reads(haddr_t addr, mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ! recv_mssg(&mssg, REQ_ENTRY_READS_RPLY_CODE) ) { + if (!recv_mssg(&mssg, REQ_ENTRY_READS_RPLY_CODE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ( mssg.req != REQ_ENTRY_READS_RPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != addr ) || - ( mssg.len != 0 ) || - ( mssg.ver != 0 ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if ((mssg.req != REQ_ENTRY_READS_RPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != addr) || (mssg.len != 0) || (mssg.ver != 0) || + (mssg.magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in req entry reads reply.\n", world_mpi_rank, __func__); } - } else { + } + else { - reported_entry_reads = mssg.count; + H5_CHECKED_ASSIGN(reported_entry_reads, int, mssg.count, unsigned); } } - if ( ! success ) { + if (success) { - if ( reported_entry_reads != expected_entry_reads ) { + if (reported_entry_reads != expected_entry_reads) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: rep/exp entry 0x%llx reads mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, (long long)addr, - reported_entry_reads, expected_entry_reads); + if (verbose) { + HDfprintf(stdout, "%d:%s: rep/exp entry 0x%" PRIxHADDR " reads mismatch (%d/%d).\n", + world_mpi_rank, __func__, addr, reported_entry_reads, expected_entry_reads); } - } + } } - return(success); + return (success); } /* verify_entry_reads() */ - /***************************************************************************** * Function: verify_entry_writes * - * Purpose: Query the server to determine the number of times the - * indicated entry has been written since the last time the - * server counters were reset. + * Purpose: Query the server to determine the number of times the + * indicated entry has been written since the last time the + * server counters were reset. * - * Return TRUE if successful, and if the supplied expected - * number of reads matches the number of reads reported by - * the server process. + * Return TRUE if successful, and if the supplied expected + * number of reads matches the number of reads reported by + * the server process. * - * Return FALSE and flag an error otherwise. + * Return FALSE and flag an error otherwise. * * Return: TRUE if successful, FALSE otherwise. * @@ -4471,15 +4345,13 @@ verify_entry_reads(haddr_t addr, *------------------------------------------------------------------------- */ static hbool_t -verify_entry_writes(haddr_t addr, - int expected_entry_writes) +verify_entry_writes(haddr_t addr, int expected_entry_writes) { - const char * fcn_name = "verify_entry_writes()"; - hbool_t success = TRUE; - int reported_entry_writes; + hbool_t success = TRUE; + int reported_entry_writes = 0; struct mssg_t mssg; - if ( success ) { + if (success) { /* compose the message */ mssg.req = REQ_ENTRY_WRITES_CODE; @@ -4492,100 +4364,90 @@ verify_entry_writes(haddr_t addr, mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ! recv_mssg(&mssg, REQ_ENTRY_WRITES_RPLY_CODE) ) { + if (!recv_mssg(&mssg, REQ_ENTRY_WRITES_RPLY_CODE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ( mssg.req != REQ_ENTRY_WRITES_RPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != addr ) || - ( mssg.len != 0 ) || - ( mssg.ver != 0 ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if ((mssg.req != REQ_ENTRY_WRITES_RPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != addr) || (mssg.len != 0) || (mssg.ver != 0) || + (mssg.magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in req entry writes reply.\n", world_mpi_rank, __func__); } - } else { + } + else { - reported_entry_writes = mssg.count; + H5_CHECKED_ASSIGN(reported_entry_writes, int, mssg.count, unsigned); } } - if ( ! success ) { + if (success) { - if ( reported_entry_writes != expected_entry_writes ) { + if (reported_entry_writes != expected_entry_writes) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: rep/exp entry 0x%llx writes mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, (long long)addr, - reported_entry_writes, expected_entry_writes); + if (verbose) { + HDfprintf(stdout, "%d:%s: rep/exp entry 0x%llx writes mismatch (%d/%d).\n", world_mpi_rank, + __func__, (long long)addr, reported_entry_writes, expected_entry_writes); } - } + } } - return(success); + return (success); } /* verify_entry_writes() */ - /***************************************************************************** * - * Function: verify_total_reads() + * Function: verify_total_reads() * - * Purpose: Query the server to obtain the total reads since the last - * server counter reset, and compare this value with the supplied - * expected value. + * Purpose: Query the server to obtain the total reads since the last + * server counter reset, and compare this value with the supplied + * expected value. * - * If the values match, return TRUE. + * If the values match, return TRUE. * - * If the values don't match, flag an error and return FALSE. + * If the values don't match, flag an error and return FALSE. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/6/10 + * Programmer: JRM -- 5/6/10 * *****************************************************************************/ static hbool_t verify_total_reads(int expected_total_reads) { - const char * fcn_name = "verify_total_reads()"; - hbool_t success = TRUE; /* will set to FALSE if appropriate. */ - long reported_total_reads; + hbool_t success = TRUE; /* will set to FALSE if appropriate. */ + long reported_total_reads; struct mssg_t mssg; - if ( success ) { + if (success) { /* compose the message */ mssg.req = REQ_TTL_READS_CODE; @@ -4598,96 +4460,86 @@ verify_total_reads(int expected_total_reads) mssg.count = 0; mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ! recv_mssg(&mssg, REQ_TTL_READS_RPLY_CODE) ) { + if (!recv_mssg(&mssg, REQ_TTL_READS_RPLY_CODE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); - } - } else if ( ( mssg.req != REQ_TTL_READS_RPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != 0 ) || - ( mssg.len != 0 ) || - ( mssg.ver != 0 ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); + } + } + else if ((mssg.req != REQ_TTL_READS_RPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != 0) || (mssg.len != 0) || + (mssg.ver != 0) || (mssg.magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, __func__); } - } else { + } + else { reported_total_reads = mssg.count; } } - if ( success ) { + if (success) { - if ( reported_total_reads != expected_total_reads ) { + if (reported_total_reads != expected_total_reads) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: reported/expected total reads mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, - reported_total_reads, expected_total_reads); - + if (verbose) { + HDfprintf(stdout, "%d:%s: reported/expected total reads mismatch (%ld/%d).\n", world_mpi_rank, + __func__, reported_total_reads, expected_total_reads); } } } - return(success); + return (success); } /* verify_total_reads() */ - /***************************************************************************** * - * Function: verify_total_writes() + * Function: verify_total_writes() * - * Purpose: Query the server to obtain the total writes since the last - * server counter reset, and compare this value with the supplied - * expected value. + * Purpose: Query the server to obtain the total writes since the last + * server counter reset, and compare this value with the supplied + * expected value. * - * If the values match, return TRUE. + * If the values match, return TRUE. * - * If the values don't match, flag an error and return FALSE. + * If the values don't match, flag an error and return FALSE. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/6/10 + * Programmer: JRM -- 5/6/10 * *****************************************************************************/ static hbool_t -verify_total_writes(int expected_total_writes) +verify_total_writes(unsigned expected_total_writes) { - const char * fcn_name = "verify_total_writes()"; - hbool_t success = TRUE; /* will set to FALSE if appropriate. */ - long reported_total_writes; + hbool_t success = TRUE; /* will set to FALSE if appropriate. */ + unsigned reported_total_writes; struct mssg_t mssg; - if ( success ) { + if (success) { /* compose the message */ mssg.req = REQ_TTL_WRITES_CODE; @@ -4700,67 +4552,59 @@ verify_total_writes(int expected_total_writes) mssg.count = 0; mssg.magic = MSSG_MAGIC; - if ( ! send_mssg(&mssg, FALSE) ) { + if (!send_mssg(&mssg, FALSE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed.\n", world_mpi_rank, __func__); } } } - if ( success ) { + if (success) { - if ( ! recv_mssg(&mssg, REQ_TTL_WRITES_RPLY_CODE) ) { + if (!recv_mssg(&mssg, REQ_TTL_WRITES_RPLY_CODE)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); - } - } else if ( ( mssg.req != REQ_TTL_WRITES_RPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != 0 ) || - ( mssg.len != 0 ) || - ( mssg.ver != 0 ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); + } + } + else if ((mssg.req != REQ_TTL_WRITES_RPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != 0) || (mssg.len != 0) || + (mssg.ver != 0) || (mssg.magic != MSSG_MAGIC)) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in req total reads reply.\n", world_mpi_rank, __func__); } - } else { + } + else { reported_total_writes = mssg.count; } } - if ( success ) { + if (success) { - if ( reported_total_writes != expected_total_writes ) { + if (reported_total_writes != expected_total_writes) { nerrors++; success = FALSE; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: reported/expected total writes mismatch (%ld/%ld).\n", - world_mpi_rank, fcn_name, - reported_total_writes, expected_total_writes); + if (verbose) { + HDfprintf(stdout, "%d:%s: reported/expected total writes mismatch (%u/%u).\n", world_mpi_rank, + __func__, reported_total_writes, expected_total_writes); } } } - return(success); + return (success); } /* verify_total_writes() */ - /***************************************************************************** * Function: unlock_entry() * @@ -4775,71 +4619,60 @@ verify_total_writes(int expected_total_writes) * * Modifications: * - * 7/11/06 - * Updated for the new local_len field in datum. + * 7/11/06 + * Updated for the new local_len field in datum. * *****************************************************************************/ - -void -unlock_entry(H5F_t * file_ptr, - int32_t idx, - unsigned int flags) +static void +unlock_entry(H5F_t *file_ptr, int32_t idx, unsigned int flags) { - const char * fcn_name = "unlock_entry()"; - herr_t dirtied; - herr_t result; - struct datum * entry_ptr; + herr_t dirtied; + herr_t result; + struct datum *entry_ptr; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( file_ptr ); - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert(file_ptr); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( entry_ptr->locked ); + HDassert(entry_ptr->locked); - dirtied = ((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG ); + dirtied = ((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG); - if ( dirtied ) { + if (dirtied) { (entry_ptr->ver)++; entry_ptr->dirty = TRUE; } - result = H5AC_unprotect(file_ptr, H5P_DATASET_XFER_DEFAULT, &(types[0]), - entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags); + result = H5AC_unprotect(file_ptr, &(types[0]), entry_ptr->base_addr, (void *)(&(entry_ptr->header)), + flags); - if ( ( result < 0 ) || - ( entry_ptr->header.type != &(types[0]) ) || - ( ( entry_ptr->len != entry_ptr->header.size ) && - ( entry_ptr->local_len != entry_ptr->header.size ) ) || - ( entry_ptr->base_addr != entry_ptr->header.addr ) ) { + if ((result < 0) || (entry_ptr->header.type != &(types[0])) || + ((entry_ptr->len != entry_ptr->header.size) && + (entry_ptr->local_len != entry_ptr->header.size)) || + (entry_ptr->base_addr != entry_ptr->header.addr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: error in H5C_unprotect().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: error in H5AC_unprotect().\n", world_mpi_rank, __func__); } - } else { + } + else { entry_ptr->locked = FALSE; + } - } - - HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE ); + HDassert(((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE); - if ( ( (flags & H5AC__DIRTIED_FLAG) != 0 ) && - ( (flags & H5C__DELETED_FLAG) == 0 ) && - ( ! ( ( ( world_mpi_rank == 0 ) && ( entry_ptr->flushed ) ) - || - ( ( world_mpi_rank != 0 ) && ( entry_ptr->cleared ) ) - ) - ) - ) { - HDassert( entry_ptr->header.is_dirty ); - HDassert( entry_ptr->dirty ); + if (((flags & H5AC__DIRTIED_FLAG) != 0) && ((flags & H5C__DELETED_FLAG) == 0) && + (!(((world_mpi_rank == 0) && (entry_ptr->flushed)) || + ((world_mpi_rank != 0) && (entry_ptr->cleared))))) { + HDassert(entry_ptr->header.is_dirty); + HDassert(entry_ptr->dirty); } } @@ -4847,7 +4680,6 @@ unlock_entry(H5F_t * file_ptr, } /* unlock_entry() */ - /***************************************************************************** * Function: unpin_entry() * @@ -4862,113 +4694,100 @@ unlock_entry(H5F_t * file_ptr, * * Modifications: * - * JRM -- 8/15/06 - * Added assertion that entry is pinned on entry. + * JRM -- 8/15/06 + * Added assertion that entry is pinned on entry. * *****************************************************************************/ - static void -unpin_entry(H5F_t * file_ptr, - int32_t idx, - hbool_t global, - hbool_t dirty, - hbool_t via_unprotect) +unpin_entry(H5F_t *file_ptr, int32_t idx, hbool_t global, hbool_t dirty, hbool_t via_unprotect) { - const char * fcn_name = "unpin_entry()"; - herr_t result; - unsigned int flags = H5AC__UNPIN_ENTRY_FLAG; - struct datum * entry_ptr; + herr_t result; + unsigned int flags = H5AC__UNPIN_ENTRY_FLAG; + struct datum *entry_ptr; - if ( nerrors == 0 ) { + if (nerrors == 0) { - HDassert( file_ptr ); - HDassert( ( 0 <= idx ) && ( idx < NUM_DATA_ENTRIES ) ); - HDassert( idx < virt_num_data_entries ); + HDassert(file_ptr); + HDassert((0 <= idx) && (idx < NUM_DATA_ENTRIES)); + HDassert(idx < virt_num_data_entries); entry_ptr = &(data[idx]); - HDassert( (entry_ptr->header).is_pinned ); - HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) ); - HDassert ( ( global && entry_ptr->global_pinned ) || - ( ! global && entry_ptr->local_pinned ) ); - HDassert ( ! ( dirty && ( ! global ) ) ); - - if ( via_unprotect ) { + HDassert((entry_ptr->header).is_pinned); + HDassert(!(entry_ptr->global_pinned && entry_ptr->local_pinned)); + HDassert((global && entry_ptr->global_pinned) || (!global && entry_ptr->local_pinned)); + HDassert(!(dirty && (!global))); - lock_entry(file_ptr, idx); + if (via_unprotect) { - if ( dirty ) { + lock_entry(file_ptr, idx); - flags |= H5AC__DIRTIED_FLAG; - } + if (dirty) { - unlock_entry(file_ptr, idx, flags); - - } else { + flags |= H5AC__DIRTIED_FLAG; + } - if ( dirty ) { + unlock_entry(file_ptr, idx, flags); + } + else { - mark_entry_dirty(idx); + if (dirty) { - } + mark_entry_dirty(idx); + } - result = H5AC_unpin_entry(entry_ptr); + result = H5AC_unpin_entry(entry_ptr); - if ( result < 0 ) { + if (result < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n", world_mpi_rank, __func__); } - } - } - - HDassert( ! ((entry_ptr->header).is_pinned) ); - - if ( global ) { + } + } - entry_ptr->global_pinned = FALSE; + HDassert(!((entry_ptr->header).is_pinned)); - } else { + if (global) { - entry_ptr->local_pinned = FALSE; + entry_ptr->global_pinned = FALSE; + } + else { - } + entry_ptr->local_pinned = FALSE; + } } return; } /* unpin_entry() */ - /*****************************************************************************/ /****************************** test functions *******************************/ /*****************************************************************************/ - /***************************************************************************** * - * Function: server_smoke_check() + * Function: server_smoke_check() * - * Purpose: Quick smoke check for the server process. + * Purpose: Quick smoke check for the server process. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 12/21/05 + * Programmer: JRM -- 12/21/05 * *****************************************************************************/ static hbool_t server_smoke_check(void) { - const char * fcn_name = "server_smoke_check()"; - hbool_t success = TRUE; - int max_nerrors; + hbool_t success = TRUE; + int max_nerrors; struct mssg_t mssg; - if ( world_mpi_rank == 0 ) { + if (world_mpi_rank == 0) { TESTING("server smoke check"); } @@ -4977,15 +4796,14 @@ server_smoke_check(void) init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { + if (world_mpi_rank == world_server_mpi_rank) { - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } } @@ -4997,102 +4815,94 @@ server_smoke_check(void) mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ mssg.base_addr = data[world_mpi_rank].base_addr; - mssg.len = data[world_mpi_rank].len; - mssg.ver = ++(data[world_mpi_rank].ver); - mssg.count = 0; - mssg.magic = MSSG_MAGIC; + H5_CHECKED_ASSIGN(mssg.len, unsigned, data[world_mpi_rank].len, size_t); + mssg.ver = ++(data[world_mpi_rank].ver); + mssg.count = 0; + mssg.magic = MSSG_MAGIC; - if ( ! ( success = send_mssg(&mssg, FALSE) ) ) { + if (!(success = send_mssg(&mssg, FALSE))) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, __func__); } } #if DO_WRITE_REQ_ACK /* try to receive the write ack from the server */ - if ( success ) { + if (success) { success = recv_mssg(&mssg, WRITE_REQ_ACK_CODE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); } } } /* verify that we received the expected ack message */ - if ( success ) { + if (success) { - if ( ( mssg.req != WRITE_REQ_ACK_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != data[world_mpi_rank].base_addr ) || - ( mssg.len != data[world_mpi_rank].len ) || - ( mssg.ver != data[world_mpi_rank].ver ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if ((mssg.req != WRITE_REQ_ACK_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != data[world_mpi_rank].base_addr) || + (mssg.len != data[world_mpi_rank].len) || (mssg.ver != data[world_mpi_rank].ver) || + (mssg.magic != MSSG_MAGIC)) { success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in write req ack.\n", world_mpi_rank, __func__); } } } #endif /* DO_WRITE_REQ_ACK */ - do_sync(); + do_sync(); - /* barrier to allow all writes to complete */ - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + /* barrier to allow all writes to complete */ + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: barrier 1 failed.\n", world_mpi_rank, __func__); } } /* verify that the expected entries have been written, the total */ - if ( success ) { + if (success) { success = verify_entry_writes(data[world_mpi_rank].base_addr, 1); - } + } - if ( success ) { + if (success) { success = verify_entry_reads(data[world_mpi_rank].base_addr, 0); - } + } - if ( success ) { + if (success) { - success = verify_total_writes(world_mpi_size - 1); + success = verify_total_writes((unsigned)(world_mpi_size - 1)); } - if ( success ) { + if (success) { success = verify_total_reads(0); } - /* barrier to allow all writes to complete */ - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + /* barrier to allow all writes to complete */ + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { success = FALSE; nerrors++; - if ( verbose ) { + if (verbose) { - HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d:%s: barrier 2 failed.\n", world_mpi_rank, __func__); } } @@ -5102,149 +4912,139 @@ server_smoke_check(void) mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ mssg.base_addr = data[world_mpi_rank].base_addr; - mssg.len = data[world_mpi_rank].len; - mssg.ver = 0; /* bogus -- should be corrected by server */ - mssg.count = 0; - mssg.magic = MSSG_MAGIC; + H5_CHECKED_ASSIGN(mssg.len, unsigned, data[world_mpi_rank].len, size_t); + mssg.ver = 0; /* bogus -- should be corrected by server */ + mssg.count = 0; + mssg.magic = MSSG_MAGIC; - if ( success ) { + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on write.\n", world_mpi_rank, __func__); } } } /* try to receive the reply from the server */ - if ( success ) { + if (success) { success = recv_mssg(&mssg, READ_REQ_REPLY_CODE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n", world_mpi_rank, __func__); } } } /* verify that we got the expected result */ - if ( success ) { + if (success) { - if ( ( mssg.req != READ_REQ_REPLY_CODE ) || - ( mssg.src != world_server_mpi_rank ) || - ( mssg.dest != world_mpi_rank ) || - ( mssg.base_addr != data[world_mpi_rank].base_addr ) || - ( mssg.len != data[world_mpi_rank].len ) || - ( mssg.ver != data[world_mpi_rank].ver ) || - ( mssg.magic != MSSG_MAGIC ) ) { + if ((mssg.req != READ_REQ_REPLY_CODE) || (mssg.src != world_server_mpi_rank) || + (mssg.dest != world_mpi_rank) || (mssg.base_addr != data[world_mpi_rank].base_addr) || + (mssg.len != data[world_mpi_rank].len) || (mssg.ver != data[world_mpi_rank].ver) || + (mssg.magic != MSSG_MAGIC)) { success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Bad data in read req reply.\n", world_mpi_rank, __func__); } } } - /* barrier to allow all writes to complete */ - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + /* barrier to allow all writes to complete */ + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { success = FALSE; nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: barrier 3 failed.\n", world_mpi_rank, __func__); } } /* verify that the expected entries have been read, and the total */ - if ( success ) { + if (success) { success = verify_entry_writes(data[world_mpi_rank].base_addr, 1); - } + } - if ( success ) { + if (success) { success = verify_entry_reads(data[world_mpi_rank].base_addr, 1); - } + } - if ( success ) { + if (success) { - success = verify_total_writes(world_mpi_size - 1); + success = verify_total_writes((unsigned)(world_mpi_size - 1)); } - if ( success ) { + if (success) { success = verify_total_reads(world_mpi_size - 1); } - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { success = FALSE; nerrors++; - if ( verbose ) { + if (verbose) { - HDfprintf(stdout, "%d:%s: barrier 4 failed.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d:%s: barrier 4 failed.\n", world_mpi_rank, __func__); } } /* reset the counters */ - if ( success ) { + if (success) { success = reset_server_counts(); } - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { success = FALSE; nerrors++; - if ( verbose ) { + if (verbose) { - HDfprintf(stdout, "%d:%s: barrier 5 failed.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d:%s: barrier 5 failed.\n", world_mpi_rank, __func__); } } /* verify that the counters have been reset */ - if ( success ) { + if (success) { success = verify_entry_writes(data[world_mpi_rank].base_addr, 0); - } + } - if ( success ) { + if (success) { success = verify_entry_reads(data[world_mpi_rank].base_addr, 0); - } + } - if ( success ) { + if (success) { success = verify_total_writes(0); } - if ( success ) { + if (success) { success = verify_total_reads(0); } - if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) { + if (MPI_SUCCESS != MPI_Barrier(file_mpi_comm)) { success = FALSE; nerrors++; - if ( verbose ) { + if (verbose) { - HDfprintf(stdout, "%d:%s: barrier 6 failed.\n", - world_mpi_rank, fcn_name); + HDfprintf(stdout, "%d:%s: barrier 6 failed.\n", world_mpi_rank, __func__); } } @@ -5253,22 +5053,21 @@ server_smoke_check(void) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ mssg.count = 0; mssg.magic = MSSG_MAGIC; - if ( success ) { + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); } } } @@ -5276,137 +5075,127 @@ server_smoke_check(void) max_nerrors = get_max_nerrors(); - if ( world_mpi_rank == 0 ) { + if (world_mpi_rank == 0) { - if ( max_nerrors == 0 ) { + if (max_nerrors == 0) { - PASSED(); - - } else { + PASSED(); + } + else { failures++; H5_FAILED(); } } - success = ( ( success ) && ( max_nerrors == 0 ) ); + success = ((success) && (max_nerrors == 0)); - return(success); + return (success); } /* server_smoke_check() */ - /***************************************************************************** * - * Function: smoke_check_1() + * Function: smoke_check_1() * - * Purpose: First smoke check for the parallel cache. + * Purpose: First smoke check for the parallel cache. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 1/4/06 + * Programmer: JRM -- 1/4/06 * *****************************************************************************/ static hbool_t smoke_check_1(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_1()"; - hbool_t success = TRUE; - int i; - int max_nerrors; - hid_t fid = -1; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; + hbool_t success = TRUE; + int i; + int max_nerrors; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; struct mssg_t mssg; - switch ( metadata_write_strategy ) { + switch (metadata_write_strategy) { - case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #1 -- process 0 only md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + if (world_mpi_rank == 0) { + TESTING("smoke check #1 -- process 0 only md write strategy"); } - break; + break; - case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #1 -- distributed md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + if (world_mpi_rank == 0) { + TESTING("smoke check #1 -- distributed md write strategy"); } - break; + break; default: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #1 -- unknown md write strategy"); + if (world_mpi_rank == 0) { + TESTING("smoke check #1 -- unknown md write strategy"); } - break; + break; } nerrors = 0; init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { + if (world_mpi_rank == world_server_mpi_rank) { - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } } else /* run the clients */ { - if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr, - metadata_write_strategy) ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { nerrors++; - fid = -1; + fid = -1; cache_ptr = NULL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); } } - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { + for (i = 0; i < (virt_num_data_entries / 2); i++) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); } - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); } /* Move the first half of the entries... */ - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + for (i = 0; i < (virt_num_data_entries / 2); i++) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); } /* ...and then move them back. */ - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); } - if ( fid >= 0 ) { + if (fid >= 0) { - if ( ! take_down_cache(fid) ) { + if (!take_down_cache(fid, cache_ptr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); } } } @@ -5415,10 +5204,9 @@ smoke_check_1(int metadata_write_strategy) * and are clean. */ - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - HDassert( data_index[i] == i ); - HDassert( ! (data[i].dirty) ); + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); } /* compose the done message */ @@ -5426,22 +5214,21 @@ smoke_check_1(int metadata_write_strategy) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ - mssg.count = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( success ) { + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); } } } @@ -5449,187 +5236,164 @@ smoke_check_1(int metadata_write_strategy) max_nerrors = get_max_nerrors(); - if ( world_mpi_rank == 0 ) { - - if ( max_nerrors == 0 ) { + if (world_mpi_rank == 0) { - PASSED(); + if (max_nerrors == 0) { - } else { + PASSED(); + } + else { failures++; H5_FAILED(); } } - success = ( ( success ) && ( max_nerrors == 0 ) ); + success = ((success) && (max_nerrors == 0)); - return(success); + return (success); } /* smoke_check_1() */ - /***************************************************************************** * - * Function: smoke_check_2() + * Function: smoke_check_2() * - * Purpose: Second smoke check for the parallel cache. + * Purpose: Second smoke check for the parallel cache. * - * Introduce random reads, but keep all processes with roughly - * the same work load. + * Introduce random reads, but keep all processes with roughly + * the same work load. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 1/12/06 + * Programmer: JRM -- 1/12/06 * *****************************************************************************/ static hbool_t smoke_check_2(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_2()"; - hbool_t success = TRUE; - int i; - int max_nerrors; - hid_t fid = -1; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; + hbool_t success = TRUE; + int i; + int max_nerrors; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; struct mssg_t mssg; - switch ( metadata_write_strategy ) { + switch (metadata_write_strategy) { - case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #2 -- process 0 only md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + if (world_mpi_rank == 0) { + TESTING("smoke check #2 -- process 0 only md write strategy"); } - break; + break; - case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #2 -- distributed md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + if (world_mpi_rank == 0) { + TESTING("smoke check #2 -- distributed md write strategy"); } - break; + break; default: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #2 -- unknown md write strategy"); + if (world_mpi_rank == 0) { + TESTING("smoke check #2 -- unknown md write strategy"); } - break; + break; } nerrors = 0; init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { + if (world_mpi_rank == world_server_mpi_rank) { - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } } else /* run the clients */ { - if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr, - metadata_write_strategy) ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { nerrors++; - fid = -1; + fid = -1; cache_ptr = NULL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); } } - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { + for (i = 0; i < (virt_num_data_entries / 2); i++) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - if ( i > 100 ) { + if (i > 100) { - lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10); + lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10); } } - for ( i = 0; i < (virt_num_data_entries / 2); i+=61 ) - { - /* Make sure we don't step on any locally pinned entries */ - if ( data[i].local_pinned ) { - unpin_entry(file_ptr, i, FALSE, FALSE, FALSE); - } + for (i = 0; i < (virt_num_data_entries / 2); i += 61) { + /* Make sure we don't step on any locally pinned entries */ + if (data[i].local_pinned) { + unpin_entry(file_ptr, i, FALSE, FALSE, FALSE); + } - pin_entry(file_ptr, i, TRUE, FALSE); - } + pin_entry(file_ptr, i, TRUE, FALSE); + } - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-=2 ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 20), - 0, 100); - local_pin_and_unpin_random_entries(file_ptr, 0, - (virt_num_data_entries / 4), - 0, 3); + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i -= 2) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 20), 0, 100); + local_pin_and_unpin_random_entries(file_ptr, 0, (virt_num_data_entries / 4), 0, 3); } - for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 10), - 0, 100); + for (i = 0; i < (virt_num_data_entries / 2); i += 2) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 10), 0, 100); } - /* we can't move pinned entries, so release any local pins now. */ - local_unpin_all_entries(file_ptr, FALSE); + /* we can't move pinned entries, so release any local pins now. */ + local_unpin_all_entries(file_ptr, FALSE); /* Move the first half of the entries... */ - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); - lock_and_unlock_random_entries(file_ptr, 0, - ((virt_num_data_entries / 50) - 1), - 0, 100); + for (i = 0; i < (virt_num_data_entries / 2); i++) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(file_ptr, 0, ((virt_num_data_entries / 50) - 1), 0, 100); } /* ...and then move them back. */ - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 100), - 0, 100); + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 100), 0, 100); } - for ( i = 0; i < (virt_num_data_entries / 2); i+=61 ) - { - hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 ); - hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 ); + for (i = 0; i < (virt_num_data_entries / 2); i += 61) { + hbool_t via_unprotect = ((((unsigned)i) & 0x01) == 0); + hbool_t dirty = ((((unsigned)i) & 0x02) == 0); - unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect); - } + unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect); + } - if ( fid >= 0 ) { + if (fid >= 0) { - if ( ! take_down_cache(fid) ) { + if (!take_down_cache(fid, cache_ptr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); } } } @@ -5638,10 +5402,9 @@ smoke_check_2(int metadata_write_strategy) * and are clean. */ - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - HDassert( data_index[i] == i ); - HDassert( ! (data[i].dirty) ); + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); } /* compose the done message */ @@ -5649,22 +5412,21 @@ smoke_check_2(int metadata_write_strategy) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ - mssg.count = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( success ) { + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); } } } @@ -5672,258 +5434,201 @@ smoke_check_2(int metadata_write_strategy) max_nerrors = get_max_nerrors(); - if ( world_mpi_rank == 0 ) { + if (world_mpi_rank == 0) { - if ( max_nerrors == 0 ) { + if (max_nerrors == 0) { - PASSED(); - - } else { + PASSED(); + } + else { failures++; H5_FAILED(); } } - success = ( ( success ) && ( max_nerrors == 0 ) ); + success = ((success) && (max_nerrors == 0)); - return(success); + return (success); } /* smoke_check_2() */ - /***************************************************************************** * - * Function: smoke_check_3() + * Function: smoke_check_3() * - * Purpose: Third smoke check for the parallel cache. + * Purpose: Third smoke check for the parallel cache. * - * Use random reads to vary the loads on the diffferent - * processors. Also force different cache size adjustments. + * Use random reads to vary the loads on the different + * processors. Also force different cache size adjustments. * - * In this test, load process 0 heavily, and the other - * processes lightly. + * In this test, load process 0 heavily, and the other + * processes lightly. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 1/13/06 + * Programmer: JRM -- 1/13/06 * *****************************************************************************/ static hbool_t smoke_check_3(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_3()"; - hbool_t success = TRUE; - int cp = 0; - int i; - int max_nerrors; - int min_count; - int max_count; - int min_idx; - int max_idx; - hid_t fid = -1; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; + hbool_t success = TRUE; + int i; + int max_nerrors; + int min_count; + int max_count; + int min_idx; + int max_idx; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; struct mssg_t mssg; - switch ( metadata_write_strategy ) { + switch (metadata_write_strategy) { - case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #3 -- process 0 only md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + if (world_mpi_rank == 0) { + TESTING("smoke check #3 -- process 0 only md write strategy"); } - break; + break; - case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #3 -- distributed md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + if (world_mpi_rank == 0) { + TESTING("smoke check #3 -- distributed md write strategy"); } - break; + break; default: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #3 -- unknown md write strategy"); + if (world_mpi_rank == 0) { + TESTING("smoke check #3 -- unknown md write strategy"); } - break; + break; } - /* 0 */ - if ( verbose ) { HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); } - nerrors = 0; init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { - - /* 1 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} + if (world_mpi_rank == world_server_mpi_rank) { - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } - - /* 2 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} } else /* run the clients */ { - /* 1 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - - if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr, - metadata_write_strategy) ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { nerrors++; - fid = -1; + fid = -1; cache_ptr = NULL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); } } - /* 2 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - min_count = 100 / ((file_mpi_rank + 1) * (file_mpi_rank + 1)); max_count = min_count + 50; - for ( i = 0; i < (virt_num_data_entries / 4); i++ ) - { + for (i = 0; i < (virt_num_data_entries / 4); i++) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - if ( i > 100 ) { + if (i > 100) { - lock_and_unlock_random_entries(file_ptr, (i - 100), i, - min_count, max_count); + lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count); } } - /* 3 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - - min_count = 100 / ((file_mpi_rank + 2) * (file_mpi_rank + 2)); max_count = min_count + 50; - for ( i = (virt_num_data_entries / 4); - i < (virt_num_data_entries / 2); - i++ ) - { + for (i = (virt_num_data_entries / 4); i < (virt_num_data_entries / 2); i++) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - if ( i % 59 == 0 ) { + if (i % 59 == 0) { - hbool_t dirty = ( (i % 2) == 0); + hbool_t dirty = ((i % 2) == 0); - if ( data[i].local_pinned ) { - unpin_entry(file_ptr, i, FALSE, FALSE, FALSE); - } - - pin_entry(file_ptr, i, TRUE, dirty); - - HDassert( !dirty || data[i].header.is_dirty ); - HDassert( data[i].header.is_pinned ); - HDassert( data[i].global_pinned ); - HDassert( ! data[i].local_pinned ); - } + if (data[i].local_pinned) { + unpin_entry(file_ptr, i, FALSE, FALSE, FALSE); + } - if ( i > 100 ) { + pin_entry(file_ptr, i, TRUE, dirty); - lock_and_unlock_random_entries(file_ptr, (i - 100), i, - min_count, max_count); + HDassert(!dirty || data[i].header.is_dirty); + HDassert(data[i].header.is_pinned); + HDassert(data[i].global_pinned); + HDassert(!data[i].local_pinned); } - local_pin_and_unpin_random_entries(file_ptr, 0, - virt_num_data_entries / 4, - 0, (file_mpi_rank + 2)); - - } + if (i > 100) { - /* 4 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} + lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count); + } + local_pin_and_unpin_random_entries(file_ptr, 0, virt_num_data_entries / 4, 0, + (file_mpi_rank + 2)); + } - /* flush the file to be sure that we have no problems flushing - * pinned entries - */ - if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + /* flush the file to be sure that we have no problems flushing + * pinned entries + */ + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); } } - /* 5 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - - min_idx = 0; - max_idx = ((virt_num_data_entries / 10) / - ((file_mpi_rank + 1) * (file_mpi_rank + 1))) - 1; - if ( max_idx <= min_idx ) { + max_idx = ((virt_num_data_entries / 10) / ((file_mpi_rank + 1) * (file_mpi_rank + 1))) - 1; + if (max_idx <= min_idx) { max_idx = min_idx + 10; } - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) { + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + if ((i >= (virt_num_data_entries / 4)) && (i % 59 == 0)) { - hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 ); - hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 ); + hbool_t via_unprotect = ((((unsigned)i) & 0x02) == 0); + hbool_t dirty = ((((unsigned)i) & 0x04) == 0); - HDassert( data[i].global_pinned ); - HDassert( ! data[i].local_pinned ); + HDassert(data[i].global_pinned); + HDassert(!data[i].local_pinned); - unpin_entry(file_ptr, i, TRUE, dirty, - via_unprotect); - } - if ( i % 2 == 0 ) { + unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect); + } + if (i % 2 == 0) { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - local_pin_and_unpin_random_entries(file_ptr, 0, - virt_num_data_entries / 2, - 0, 2); - lock_and_unlock_random_entries(file_ptr, - min_idx, max_idx, 0, 100); - } + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + local_pin_and_unpin_random_entries(file_ptr, 0, virt_num_data_entries / 2, 0, 2); + lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100); + } } - /* 6 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - min_idx = 0; - max_idx = ((virt_num_data_entries / 10) / - ((file_mpi_rank + 3) * (file_mpi_rank + 3))) - 1; - if ( max_idx <= min_idx ) { + max_idx = ((virt_num_data_entries / 10) / ((file_mpi_rank + 3) * (file_mpi_rank + 3))) - 1; + if (max_idx <= min_idx) { max_idx = min_idx + 10; } - for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - lock_and_unlock_random_entries(file_ptr, - min_idx, max_idx, 0, 100); + for (i = 0; i < (virt_num_data_entries / 2); i += 2) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100); } - /* 7 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - /* we can't move pinned entries, so release any local pins now. */ local_unpin_all_entries(file_ptr, FALSE); @@ -5931,84 +5636,60 @@ smoke_check_3(int metadata_write_strategy) max_count = min_count + 100; /* move the first half of the entries... */ - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 20), - min_count, max_count); + for (i = 0; i < (virt_num_data_entries / 2); i++) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 20), min_count, max_count); } - /* 8 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - /* ...and then move them back. */ - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 40), - min_count, max_count); + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 40), min_count, max_count); } - /* 9 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - /* finally, do some dirty lock/unlocks while we give the cache * a chance t reduce its size. */ min_count = 200 / ((file_mpi_rank + 1) * (file_mpi_rank + 1)); max_count = min_count + 100; - for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) - { - local_pin_and_unpin_random_entries(file_ptr, 0, - (virt_num_data_entries / 2), - 0, 5); + for (i = 0; i < (virt_num_data_entries / 2); i += 2) { + local_pin_and_unpin_random_entries(file_ptr, 0, (virt_num_data_entries / 2), 0, 5); - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - if ( i > 100 ) { + if (i > 100) { - lock_and_unlock_random_entries(file_ptr, (i - 100), i, - min_count, max_count); + lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count); } } - /* 10 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - /* release any local pins before we take down the cache. */ local_unpin_all_entries(file_ptr, FALSE); - if ( fid >= 0 ) { + if (fid >= 0) { - if ( ! take_down_cache(fid) ) { + if (!take_down_cache(fid, cache_ptr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); } } } - /* 11 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} - /* verify that all instances of datum are back where the started * and are clean. */ - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - HDassert( data_index[i] == i ); - HDassert( ! (data[i].dirty) ); + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); } /* compose the done message */ @@ -6016,280 +5697,247 @@ smoke_check_3(int metadata_write_strategy) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ - mssg.count = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( success ) { - + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); } } } - - /* 12 */ - if ( verbose ) {HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++);} } max_nerrors = get_max_nerrors(); - if ( world_mpi_rank == 0 ) { - - if ( max_nerrors == 0 ) { + if (world_mpi_rank == 0) { - PASSED(); + if (max_nerrors == 0) { - } else { + PASSED(); + } + else { failures++; H5_FAILED(); } } - success = ( ( success ) && ( max_nerrors == 0 ) ); + success = ((success) && (max_nerrors == 0)); - return(success); + return (success); } /* smoke_check_3() */ - /***************************************************************************** * - * Function: smoke_check_4() + * Function: smoke_check_4() * - * Purpose: Fourth smoke check for the parallel cache. + * Purpose: Fourth smoke check for the parallel cache. * - * Use random reads to vary the loads on the diffferent - * processors. Also force different cache size adjustments. + * Use random reads to vary the loads on the different + * processors. Also force different cache size adjustments. * - * In this test, load process 0 lightly, and the other - * processes heavily. + * In this test, load process 0 lightly, and the other + * processes heavily. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 1/13/06 + * Programmer: JRM -- 1/13/06 * *****************************************************************************/ static hbool_t smoke_check_4(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_4()"; - hbool_t success = TRUE; - int i; - int max_nerrors; - int min_count; - int max_count; - int min_idx; - int max_idx; - hid_t fid = -1; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; + hbool_t success = TRUE; + int i; + int max_nerrors; + int min_count; + int max_count; + int min_idx; + int max_idx; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; struct mssg_t mssg; - switch ( metadata_write_strategy ) { + switch (metadata_write_strategy) { - case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #4 -- process 0 only md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + if (world_mpi_rank == 0) { + TESTING("smoke check #4 -- process 0 only md write strategy"); } - break; + break; - case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #4 -- distributed md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + if (world_mpi_rank == 0) { + TESTING("smoke check #4 -- distributed md write strategy"); } - break; + break; default: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #4 -- unknown md write strategy"); + if (world_mpi_rank == 0) { + TESTING("smoke check #4 -- unknown md write strategy"); } - break; + break; } nerrors = 0; init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { + if (world_mpi_rank == world_server_mpi_rank) { - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } } else /* run the clients */ { - if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr, - metadata_write_strategy) ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { nerrors++; - fid = -1; + fid = -1; cache_ptr = NULL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); } } - min_count = 100 * (file_mpi_rank % 4); max_count = min_count + 50; - for ( i = 0; i < (virt_num_data_entries / 4); i++ ) - { + for (i = 0; i < (virt_num_data_entries / 4); i++) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - if ( i > 100 ) { + if (i > 100) { - lock_and_unlock_random_entries(file_ptr, (i - 100), i, - min_count, max_count); + lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count); } } min_count = 10 * (file_mpi_rank % 4); max_count = min_count + 100; - for ( i = (virt_num_data_entries / 4); - i < (virt_num_data_entries / 2); - i++ ) - { - if ( i % 2 == 0 ) { + for (i = (virt_num_data_entries / 4); i < (virt_num_data_entries / 2); i++) { + if (i % 2 == 0) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); + } + else { - } else { - - /* Insert some entries pinned, and then unpin them - * immediately. We have tested pinned entries elsewhere, - * so it should be sufficient to verify that the - * entries are in fact pinned (which unpin_entry() should do). - */ + /* Insert some entries pinned, and then unpin them + * immediately. We have tested pinned entries elsewhere, + * so it should be sufficient to verify that the + * entries are in fact pinned (which unpin_entry() should do). + */ insert_entry(cache_ptr, file_ptr, i, H5C__PIN_ENTRY_FLAG); unpin_entry(file_ptr, i, TRUE, FALSE, FALSE); - } + } - if ( i % 59 == 0 ) { + if (i % 59 == 0) { - hbool_t dirty = ( (i % 2) == 0); + hbool_t dirty = ((i % 2) == 0); - if ( data[i].local_pinned ) { + if (data[i].local_pinned) { unpin_entry(file_ptr, i, FALSE, FALSE, FALSE); } pin_entry(file_ptr, i, TRUE, dirty); - HDassert( !dirty || data[i].header.is_dirty ); - HDassert( data[i].header.is_pinned ); - HDassert( data[i].global_pinned ); - HDassert( ! data[i].local_pinned ); + HDassert(!dirty || data[i].header.is_dirty); + HDassert(data[i].header.is_pinned); + HDassert(data[i].global_pinned); + HDassert(!data[i].local_pinned); } - if ( i > 100 ) { + if (i > 100) { - lock_and_unlock_random_entries(file_ptr, (i - 100), i, - min_count, max_count); + lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count); } - local_pin_and_unpin_random_entries(file_ptr, 0, - (virt_num_data_entries / 4), - 0, (file_mpi_rank + 2)); + local_pin_and_unpin_random_entries(file_ptr, 0, (virt_num_data_entries / 4), 0, + (file_mpi_rank + 2)); } - /* flush the file to be sure that we have no problems flushing - * pinned entries - */ - if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + * pinned entries + */ + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); } } - min_idx = 0; - max_idx = (((virt_num_data_entries / 10) / 4) * - ((file_mpi_rank % 4) + 1)) - 1; + max_idx = (((virt_num_data_entries / 10) / 4) * ((file_mpi_rank % 4) + 1)) - 1; - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) { + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + if ((i >= (virt_num_data_entries / 4)) && (i % 59 == 0)) { - hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 ); - hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 ); + hbool_t via_unprotect = ((((unsigned)i) & 0x02) == 0); + hbool_t dirty = ((((unsigned)i) & 0x04) == 0); - HDassert( data[i].global_pinned ); - HDassert( ! data[i].local_pinned ); + HDassert(data[i].global_pinned); + HDassert(!data[i].local_pinned); unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect); } - if ( i % 2 == 0 ) { + if (i % 2 == 0) { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - lock_and_unlock_random_entries(file_ptr, - min_idx, max_idx, 0, 100); - } + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100); + } } min_idx = 0; - max_idx = (((virt_num_data_entries / 10) / 8) * - ((file_mpi_rank % 4) + 1)) - 1; + max_idx = (((virt_num_data_entries / 10) / 8) * ((file_mpi_rank % 4) + 1)) - 1; - for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - lock_and_unlock_random_entries(file_ptr, - min_idx, max_idx, 0, 100); + for (i = 0; i < (virt_num_data_entries / 2); i += 2) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + lock_and_unlock_random_entries(file_ptr, min_idx, max_idx, 0, 100); } - /* we can't move pinned entries, so release any local pins now. */ - local_unpin_all_entries(file_ptr, FALSE); + /* we can't move pinned entries, so release any local pins now. */ + local_unpin_all_entries(file_ptr, FALSE); min_count = 10 * (file_mpi_rank % 4); max_count = min_count + 100; /* move the first half of the entries... */ - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 20), - min_count, max_count); + for (i = 0; i < (virt_num_data_entries / 2); i++) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 20), min_count, max_count); } /* ...and then move them back. */ - for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); - lock_and_unlock_random_entries(file_ptr, 0, - (virt_num_data_entries / 40), - min_count, max_count); + for (i = (virt_num_data_entries / 2) - 1; i >= 0; i--) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + move_entry(file_ptr, i, (i + (virt_num_data_entries / 2))); + lock_and_unlock_random_entries(file_ptr, 0, (virt_num_data_entries / 40), min_count, max_count); } /* finally, do some dirty lock/unlocks while we give the cache @@ -6298,26 +5946,23 @@ smoke_check_4(int metadata_write_strategy) min_count = 100 * (file_mpi_rank % 4); max_count = min_count + 100; - for ( i = 0; i < (virt_num_data_entries / 2); i+=2 ) - { - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + for (i = 0; i < (virt_num_data_entries / 2); i += 2) { + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - if ( i > 100 ) { + if (i > 100) { - lock_and_unlock_random_entries(file_ptr, (i - 100), i, - min_count, max_count); + lock_and_unlock_random_entries(file_ptr, (i - 100), i, min_count, max_count); } } - if ( fid >= 0 ) { + if (fid >= 0) { - if ( ! take_down_cache(fid) ) { + if (!take_down_cache(fid, cache_ptr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); } } } @@ -6326,10 +5971,9 @@ smoke_check_4(int metadata_write_strategy) * and are clean. */ - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - HDassert( data_index[i] == i ); - HDassert( ! (data[i].dirty) ); + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); } /* compose the done message */ @@ -6337,23 +5981,21 @@ smoke_check_4(int metadata_write_strategy) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ - mssg.count = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( success ) { - + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); } } } @@ -6361,240 +6003,169 @@ smoke_check_4(int metadata_write_strategy) max_nerrors = get_max_nerrors(); - if ( world_mpi_rank == 0 ) { + if (world_mpi_rank == 0) { - if ( max_nerrors == 0 ) { + if (max_nerrors == 0) { - PASSED(); - - } else { + PASSED(); + } + else { failures++; H5_FAILED(); } } - success = ( ( success ) && ( max_nerrors == 0 ) ); + success = ((success) && (max_nerrors == 0)); - return(success); + return (success); } /* smoke_check_4() */ - /***************************************************************************** * - * Function: smoke_check_5() + * Function: smoke_check_5() * - * Purpose: Similar to smoke check 1, but modified to verify that - * H5AC_mark_entry_dirty() works in the parallel case. + * Purpose: Similar to smoke check 1, but modified to verify that + * H5AC_mark_entry_dirty() works in the parallel case. * - * Return: Success: TRUE + * Return: Success: TRUE * - * Failure: FALSE + * Failure: FALSE * - * Programmer: JRM -- 5/18/06 + * Programmer: JRM -- 5/18/06 * *****************************************************************************/ static hbool_t smoke_check_5(int metadata_write_strategy) { - const char * fcn_name = "smoke_check_5()"; - hbool_t success = TRUE; - int cp = 0; - int i; - int max_nerrors; - hid_t fid = -1; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; + hbool_t success = TRUE; + int i; + int max_nerrors; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; struct mssg_t mssg; - switch ( metadata_write_strategy ) { + switch (metadata_write_strategy) { - case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #5 -- process 0 only md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + if (world_mpi_rank == 0) { + TESTING("smoke check #5 -- process 0 only md write strategy"); } - break; + break; - case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #5 -- distributed md write strategy"); + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + if (world_mpi_rank == 0) { + TESTING("smoke check #5 -- distributed md write strategy"); } - break; + break; default: - if ( world_mpi_rank == 0 ) { - TESTING("smoke check #5 -- unknown md write strategy"); + if (world_mpi_rank == 0) { + TESTING("smoke check #5 -- unknown md write strategy"); } - break; + break; } - - /* 0 */ - if ( verbose ) { HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); } - nerrors = 0; init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { + if (world_mpi_rank == world_server_mpi_rank) { - /* 1 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } - - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } - - /* 2 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } } else /* run the clients */ { - /* 1 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } - - if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr, - metadata_write_strategy) ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { nerrors++; - fid = -1; + fid = -1; cache_ptr = NULL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); } } - /* 2 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } - - for ( i = 0; i < (virt_num_data_entries / 2); i++ ) - { + for (i = 0; i < (virt_num_data_entries / 2); i++) { insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); } - /* 3 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } - - /* flush the file so we can lock known clean entries. */ - if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + /* flush the file so we can lock known clean entries. */ + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); } } - /* 4 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } + for (i = 0; i < (virt_num_data_entries / 4); i++) { + lock_entry(file_ptr, i); - for ( i = 0; i < (virt_num_data_entries / 4); i++ ) - { - lock_entry(file_ptr, i); - - if ( i % 2 == 0 ) - { - mark_entry_dirty(i); - } - - unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); + if (i % 2 == 0) { + mark_entry_dirty(i); + } - if ( i % 2 == 1 ) - { - if ( i % 4 == 1 ) { + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); - lock_entry(file_ptr, i); - unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); - } + if (i % 2 == 1) { + if (i % 4 == 1) { - expunge_entry(file_ptr, i); - } - } + lock_entry(file_ptr, i); + unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG); + } - /* 5 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); + expunge_entry(file_ptr, i); + } } - for ( i = (virt_num_data_entries / 2) - 1; - i >= (virt_num_data_entries / 4); - i-- ) - { - pin_entry(file_ptr, i, TRUE, FALSE); + for (i = (virt_num_data_entries / 2) - 1; i >= (virt_num_data_entries / 4); i--) { + pin_entry(file_ptr, i, TRUE, FALSE); - if ( i % 2 == 0 ) - { - if ( i % 8 <= 4 ) { + if (i % 2 == 0) { + if (i % 8 <= 4) { - resize_entry(i, data[i].len / 2); - } + resize_entry(i, data[i].len / 2); + } mark_entry_dirty(i); - if ( i % 8 <= 4 ) { - - resize_entry(i, data[i].len); - } - } + if (i % 8 <= 4) { - unpin_entry(file_ptr, i, TRUE, FALSE, FALSE); - } + resize_entry(i, data[i].len); + } + } - /* 6 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); + unpin_entry(file_ptr, i, TRUE, FALSE, FALSE); } - if ( fid >= 0 ) { + if (fid >= 0) { - if ( ! take_down_cache(fid) ) { + if (!take_down_cache(fid, cache_ptr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); } } } - /* 7 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } - /* verify that all instance of datum are back where the started * and are clean. */ - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - HDassert( data_index[i] == i ); - HDassert( ! (data[i].dirty) ); - } - - /* 8 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); } /* compose the done message */ @@ -6602,57 +6173,50 @@ smoke_check_5(int metadata_write_strategy) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ - mssg.count = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( success ) { + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); } } } - - /* 9 */ - if ( verbose ) { - HDfprintf(stderr, "%d: cp = %d\n", world_mpi_rank, cp++); - } } max_nerrors = get_max_nerrors(); - if ( world_mpi_rank == 0 ) { + if (world_mpi_rank == 0) { - if ( max_nerrors == 0 ) { + if (max_nerrors == 0) { - PASSED(); - - } else { + PASSED(); + } + else { failures++; H5_FAILED(); } } - success = ( ( success ) && ( max_nerrors == 0 ) ); + success = ((success) && (max_nerrors == 0)); - return(success); + return (success); } /* smoke_check_5() */ - /***************************************************************************** * - * Function: trace_file_check() + * Function: trace_file_check() * * Purpose: A basic test of the trace file capability. In essence, * we invoke all operations that generate trace file output, @@ -6680,13 +6244,11 @@ smoke_check_5(int metadata_write_strategy) * - H5AC_expunge_entry() * - H5AC_resize_entry() * - * This test is skipped if H5_METADATA_TRACE_FILE is undefined. + * Return: Success: TRUE * - * Return: Success: TRUE + * Failure: FALSE * - * Failure: FALSE - * - * Programmer: JRM -- 6/13/06 + * Programmer: JRM -- 6/13/06 * *****************************************************************************/ static hbool_t @@ -6694,262 +6256,220 @@ trace_file_check(int metadata_write_strategy) { hbool_t success = TRUE; -#ifdef H5_METADATA_TRACE_FILE - - const char * fcn_name = "trace_file_check()"; - const char *((* expected_output)[]) = NULL; - const char * expected_output_0[] = - { - "### HDF5 metadata cache trace file version 1 ###\n", - "H5AC_set_cache_auto_resize_config 1 0 1 0 \"t_cache_trace.txt\" 1 0 2097152 0.300000 33554432 1048576 50000 1 0.900000 2.000000 1 1.000000 0.250000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 0 0\n", - "H5AC_insert_entry 0x200 25 0x0 2 0\n", - "H5AC_insert_entry 0x202 25 0x0 2 0\n", - "H5AC_insert_entry 0x204 25 0x0 4 0\n", - "H5AC_insert_entry 0x208 25 0x0 6 0\n", - "H5AC_protect 0x200 25 H5AC_WRITE 2 1\n", - "H5AC_mark_entry_dirty 0x200 0\n", - "H5AC_unprotect 0x200 25 0 0 0\n", - "H5AC_protect 0x202 25 H5AC_WRITE 2 1\n", - "H5AC_pin_protected_entry 0x202 0\n", - "H5AC_unprotect 0x202 25 0 0 0\n", - "H5AC_unpin_entry 0x202 0\n", - "H5AC_expunge_entry 0x202 25 0\n", - "H5AC_protect 0x204 25 H5AC_WRITE 4 1\n", - "H5AC_pin_protected_entry 0x204 0\n", - "H5AC_unprotect 0x204 25 0 0 0\n", - "H5AC_mark_entry_dirty 0x204 0 0 0\n", - "H5AC_resize_entry 0x204 2 0\n", - "H5AC_resize_entry 0x204 4 0\n", - "H5AC_unpin_entry 0x204 0\n", - "H5AC_move_entry 0x200 0x8c65 25 0\n", - "H5AC_move_entry 0x8c65 0x200 25 0\n", - "H5AC_flush 0\n", - NULL - }; - const char * expected_output_1[] = - { - "### HDF5 metadata cache trace file version 1 ###\n", - "H5AC_set_cache_auto_resize_config 1 0 1 0 \"t_cache_trace.txt\" 1 0 2097152 0.300000 33554432 1048576 50000 1 0.900000 2.000000 1 1.000000 0.250000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 1 0\n", - "H5AC_insert_entry 0x200 25 0x0 2 0\n", - "H5AC_insert_entry 0x202 25 0x0 2 0\n", - "H5AC_insert_entry 0x204 25 0x0 4 0\n", - "H5AC_insert_entry 0x208 25 0x0 6 0\n", - "H5AC_protect 0x200 25 H5AC_WRITE 2 1\n", - "H5AC_mark_entry_dirty 0x200 0\n", - "H5AC_unprotect 0x200 25 0 0 0\n", - "H5AC_protect 0x202 25 H5AC_WRITE 2 1\n", - "H5AC_pin_protected_entry 0x202 0\n", - "H5AC_unprotect 0x202 25 0 0 0\n", - "H5AC_unpin_entry 0x202 0\n", - "H5AC_expunge_entry 0x202 25 0\n", - "H5AC_protect 0x204 25 H5AC_WRITE 4 1\n", - "H5AC_pin_protected_entry 0x204 0\n", - "H5AC_unprotect 0x204 25 0 0 0\n", - "H5AC_mark_entry_dirty 0x204 0 0 0\n", - "H5AC_resize_pinned_entry 0x204 2 0\n", - "H5AC_resize_pinned_entry 0x204 4 0\n", - "H5AC_unpin_entry 0x204 0\n", - "H5AC_move_entry 0x200 0x8c65 25 0\n", - "H5AC_move_entry 0x8c65 0x200 25 0\n", - "H5AC_flush 0\n", - NULL - }; - char buffer[256]; - char trace_file_name[64]; - hbool_t done = FALSE; - int i; - int max_nerrors; - int expected_line_len; - int actual_line_len; - hid_t fid = -1; - H5F_t * file_ptr = NULL; - H5C_t * cache_ptr = NULL; - FILE * trace_file_ptr = NULL; + const char *((*expected_output)[]) = NULL; + const char *expected_output_0[] = {"### HDF5 metadata cache trace file version 1 ###\n", + "H5AC_set_cache_auto_resize_config", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_protect", + "H5AC_mark_entry_dirty", + "H5AC_unprotect", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_unpin_entry", + "H5AC_expunge_entry", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_mark_entry_dirty", + "H5AC_resize_entry", + "H5AC_resize_entry", + "H5AC_unpin_entry", + "H5AC_move_entry", + "H5AC_move_entry", + "H5AC_flush", + "H5AC_flush", + NULL}; + const char *expected_output_1[] = {"### HDF5 metadata cache trace file version 1 ###\n", + "H5AC_set_cache_auto_resize_config", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_insert_entry", + "H5AC_protect", + "H5AC_mark_entry_dirty", + "H5AC_unprotect", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_unpin_entry", + "H5AC_expunge_entry", + "H5AC_protect", + "H5AC_pin_protected_entry", + "H5AC_unprotect", + "H5AC_mark_entry_dirty", + "H5AC_resize_entry", + "H5AC_resize_entry", + "H5AC_unpin_entry", + "H5AC_move_entry", + "H5AC_move_entry", + "H5AC_flush", + "H5AC_flush", + NULL}; + char buffer[256]; + char trace_file_name[64]; + hbool_t done = FALSE; + int i; + int max_nerrors; + size_t expected_line_len; + size_t actual_line_len; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + FILE *trace_file_ptr = NULL; H5AC_cache_config_t config; - struct mssg_t mssg; + struct mssg_t mssg; -#endif /* H5_METADATA_TRACE_FILE */ + switch (metadata_write_strategy) { - switch ( metadata_write_strategy ) { + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: - case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: -#ifdef H5_METADATA_TRACE_FILE expected_output = &expected_output_0; -#endif /* H5_METADATA_TRACE_FILE */ - if ( world_mpi_rank == 0 ) { - TESTING( - "trace file collection -- process 0 only md write strategy"); - } - break; - case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: -#ifdef H5_METADATA_TRACE_FILE + if (world_mpi_rank == 0) + TESTING("trace file collection -- process 0 only md write strategy"); + break; + + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + expected_output = &expected_output_1; -#endif /* H5_METADATA_TRACE_FILE */ - if ( world_mpi_rank == 0 ) { - TESTING( - "trace file collection -- distributed md write strategy"); - } - break; + + if (world_mpi_rank == 0) + TESTING("trace file collection -- distributed md write strategy"); + break; default: -#ifdef H5_METADATA_TRACE_FILE + /* this will almost certainly cause a failure, but it keeps us * from de-referenceing a NULL pointer. */ expected_output = &expected_output_0; -#endif /* H5_METADATA_TRACE_FILE */ - if ( world_mpi_rank == 0 ) { - TESTING("trace file collection -- unknown md write strategy"); - } - break; - } -#ifdef H5_METADATA_TRACE_FILE + if (world_mpi_rank == 0) + TESTING("trace file collection -- unknown md write strategy"); + break; + } /* end switch */ nerrors = 0; init_data(); reset_stats(); - if ( world_mpi_rank == world_server_mpi_rank ) { + if (world_mpi_rank == world_server_mpi_rank) { - if ( ! server_main() ) { + if (!server_main()) { - /* some error occured in the server -- report failure */ + /* some error occurred in the server -- report failure */ nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: server_main() failed.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } - else /* run the clients */ - { + else { + /* run the clients */ - if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr, - metadata_write_strategy) ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { nerrors++; - fid = -1; + fid = -1; cache_ptr = NULL; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); } - if ( nerrors == 0 ) { + if (nerrors == 0) { config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) - != SUCCEED ) { - - nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); - - } else { - + if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) { + nerrors++; + HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, + __func__); + } + else { config.open_trace_file = TRUE; - strcpy(config.trace_file_name, "t_cache_trace.txt"); + strcpy(config.trace_file_name, "t_cache_trace.txt"); - if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) - != SUCCEED ) { - - nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) { + nerrors++; + HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, + __func__); } } - } + } /* end if */ - insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET); - insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET); - insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET); - insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET); + insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET); + insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET); + insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET); + insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET); - lock_entry(file_ptr, 0); - mark_entry_dirty(0); - unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET); + lock_entry(file_ptr, 0); + mark_entry_dirty(0); + unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET); - lock_entry(file_ptr, 1); + lock_entry(file_ptr, 1); pin_protected_entry(1, TRUE); - unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET); + unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET); unpin_entry(file_ptr, 1, TRUE, FALSE, FALSE); expunge_entry(file_ptr, 1); - lock_entry(file_ptr, 2); + lock_entry(file_ptr, 2); pin_protected_entry(2, TRUE); - unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET); - mark_entry_dirty(2); + unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET); + mark_entry_dirty(2); resize_entry(2, data[2].len / 2); resize_entry(2, data[2].len); unpin_entry(file_ptr, 2, TRUE, FALSE, FALSE); - move_entry(file_ptr, 0, 20); - move_entry(file_ptr, 0, 20); + move_entry(file_ptr, 0, 20); + move_entry(file_ptr, 0, 20); - if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) { + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); } - if ( nerrors == 0 ) { - + if (nerrors == 0) { config.version = H5AC__CURR_CACHE_CONFIG_VERSION; - if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config) - != SUCCEED ) { - - nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); - - } else { - - config.open_trace_file = FALSE; - config.close_trace_file = TRUE; - config.trace_file_name[0] = '\0'; - - if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config) - != SUCCEED ) { + if (H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) { + nerrors++; + HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, + __func__); + } + else { + config.open_trace_file = FALSE; + config.close_trace_file = TRUE; + config.trace_file_name[0] = '\0'; - nerrors++; - HDfprintf(stdout, - "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", - world_mpi_rank, fcn_name); + if (H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) { + nerrors++; + HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, + __func__); } } - } + } /* end if */ - if ( fid >= 0 ) { - - if ( ! take_down_cache(fid) ) { + if (fid >= 0) { + if (!take_down_cache(fid, cache_ptr)) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); } - } + } /* end if */ /* verify that all instance of datum are back where the started * and are clean. */ - for ( i = 0; i < NUM_DATA_ENTRIES; i++ ) - { - HDassert( data_index[i] == i ); - HDassert( ! (data[i].dirty) ); + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); } /* compose the done message */ @@ -6957,210 +6477,473 @@ trace_file_check(int metadata_write_strategy) mssg.src = world_mpi_rank; mssg.dest = world_server_mpi_rank; mssg.mssg_num = -1; /* set by send function */ - mssg.base_addr = 0; /* not used */ - mssg.len = 0; /* not used */ - mssg.ver = 0; /* not used */ - mssg.count = 0; /* not used */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ mssg.magic = MSSG_MAGIC; - if ( success ) { - + if (success) { success = send_mssg(&mssg, FALSE); - if ( ! success ) { + if (!success) { + nerrors++; + if (verbose) + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); + } + } /* end if */ + + if (nerrors == 0) { + HDsnprintf(trace_file_name, sizeof(trace_file_name), "t_cache_trace.txt.%d", (int)file_mpi_rank); + + if ((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", - world_mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, __func__); + } + } /* end if */ + + i = 0; + while ((nerrors == 0) && (!done)) { + /* Get lines of actual and expected data */ + if ((*expected_output)[i] == NULL) + expected_line_len = (size_t)0; + else + expected_line_len = HDstrlen((*expected_output)[i]); + + if (HDfgets(buffer, 255, trace_file_ptr) != NULL) + actual_line_len = HDstrlen(buffer); + else + actual_line_len = (size_t)0; + + /* Compare the lines */ + /* Handle running out of data */ + if ((actual_line_len == 0) || (expected_line_len == 0)) { + if ((actual_line_len == 0) && (expected_line_len == 0)) { + /* Both ran out at the same time - we're done */ + done = TRUE; } + else { + /* One ran out before the other - BADNESS */ + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, + __func__, i); + if (expected_line_len == 0) { + HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, __func__, + "<EMPTY>", expected_line_len); + HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, __func__, + buffer, actual_line_len); + } + if (actual_line_len == 0) { + HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, __func__, + (*expected_output)[i], expected_line_len); + HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, __func__, + "<EMPTY>", actual_line_len); + } + } + HDfprintf(stdout, "BADNESS BADNESS BADNESS\n"); + } + } + /* We directly compare the header line (line 0) */ + else if (0 == i) { + if ((actual_line_len != expected_line_len) || + (HDstrcmp(buffer, (*expected_output)[i]) != 0)) { + + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, + __func__, i); + HDfprintf(stdout, "%d:%s: expected = \"%s\" %zu\n", world_mpi_rank, __func__, + (*expected_output)[i], expected_line_len); + HDfprintf(stdout, "%d:%s: actual = \"%s\" %zu\n", world_mpi_rank, __func__, buffer, + actual_line_len); + } + } + } + /* All other lines we tokenize and just compare the function name. This + * keeps the test from being too fragile. + */ + else { + char *tok = NULL; /* token for actual line */ + + tok = HDstrtok(buffer, " "); + + if (HDstrcmp(tok, (*expected_output)[i]) != 0) { + + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, + __func__, i); + HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, __func__, + (*expected_output)[i]); + HDfprintf(stdout, "%d:%s: actual = \"%s\"\n", world_mpi_rank, __func__, tok); + } + } + } /* end else */ + + i++; + } /* end while */ + + /* Clean up the trace file */ + if (trace_file_ptr != NULL) { + HDfclose(trace_file_ptr); + trace_file_ptr = NULL; + HDremove(trace_file_name); + } + } /* end giant else that runs clients */ + + max_nerrors = get_max_nerrors(); + + if (world_mpi_rank == 0) { + + if (max_nerrors == 0) { + PASSED(); + } + else { + failures++; + H5_FAILED(); + } + } + + success = ((success) && (max_nerrors == 0)); + + return (success); + +} /* trace_file_check() */ + +/***************************************************************************** + * + * Function: smoke_check_6() + * + * Purpose: Sixth smoke check for the parallel cache. + * + * Return: Success: TRUE + * + * Failure: FALSE + * + * Programmer: JRM -- 1/13/06 + * + *****************************************************************************/ +static hbool_t +smoke_check_6(int metadata_write_strategy) +{ + H5P_coll_md_read_flag_t md_reads_file_flag; + hbool_t md_reads_context_flag; + hbool_t success = TRUE; + int i; + int max_nerrors; + hid_t fid = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + struct mssg_t mssg; + + switch (metadata_write_strategy) { + + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + if (world_mpi_rank == 0) { + TESTING("smoke check #6 -- process 0 only md write strategy"); + } + break; + + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + if (world_mpi_rank == 0) { + TESTING("smoke check #6 -- distributed md write strategy"); + } + break; + + default: + if (world_mpi_rank == 0) { + TESTING("smoke check #6 -- unknown md write strategy"); + } + break; + } + + nerrors = 0; + init_data(); + reset_stats(); + + if (world_mpi_rank == world_server_mpi_rank) { + + if (!server_main()) { + + /* some error occurred in the server -- report failure */ + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, __func__); } } + } + else /* run the clients */ + { + int temp; - if ( nerrors == 0 ) { + if (!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy)) { - sprintf(trace_file_name, "t_cache_trace.txt.%d", - (int)file_mpi_rank); + nerrors++; + fid = -1; + cache_ptr = NULL; + if (verbose) { + HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, __func__); + } + } + + temp = virt_num_data_entries; + virt_num_data_entries = NUM_DATA_ENTRIES; - if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) { + /* insert the first half collectively */ + md_reads_file_flag = H5P_USER_TRUE; + md_reads_context_flag = TRUE; + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); + for (i = 0; i < virt_num_data_entries / 2; i++) { + struct datum *entry_ptr; + entry_ptr = &(data[i]); + insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); + + if (TRUE != entry_ptr->header.coll_access) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: HDfopen failed.\n", - world_mpi_rank, fcn_name); + if (verbose) { + HDfprintf(stdout, "%d:%s: Entry inserted not marked as collective.\n", world_mpi_rank, + __func__); } } - } - i = 0; - while ( ( nerrors == 0 ) && ( ! done ) ) - { - if ( (*expected_output)[i] == NULL ) { + /* Make sure coll entries do not cross the 80% threshold */ + H5_CHECK_OVERFLOW(cache_ptr->max_cache_size, size_t, double); + HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size); + } + /* Restore collective metadata reads state */ + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); - expected_line_len = 0; + /* insert the other half independently */ + md_reads_file_flag = H5P_USER_FALSE; + md_reads_context_flag = FALSE; + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); + for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) { + struct datum *entry_ptr; + entry_ptr = &(data[i]); - } else { + insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET); - expected_line_len = HDstrlen((*expected_output)[i]); - } + if (FALSE != entry_ptr->header.coll_access) { + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: Entry inserted independently marked as collective.\n", + world_mpi_rank, __func__); + } + } - if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) { + /* Make sure coll entries do not cross the 80% threshold */ + HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size); + } + /* Restore collective metadata reads state */ + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); - actual_line_len = strlen(buffer); + /* flush the file */ + if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) { + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, __func__); + } + } - } else { + /* Protect the first half of the entries collectively */ + md_reads_file_flag = H5P_USER_TRUE; + md_reads_context_flag = TRUE; + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); + for (i = 0; i < (virt_num_data_entries / 2); i++) { + struct datum *entry_ptr; + entry_ptr = &(data[i]); - actual_line_len = 0; - } + lock_entry(file_ptr, i); - if ( ( actual_line_len == 0 ) && ( expected_line_len == 0 ) ) { + if (TRUE != entry_ptr->header.coll_access) { + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: Entry protected not marked as collective.\n", world_mpi_rank, + __func__); + } + } + + /* Make sure coll entries do not cross the 80% threshold */ + HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size); + } + /* Restore collective metadata reads state */ + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); - done = TRUE; + /* protect the other half independently */ + md_reads_file_flag = H5P_USER_FALSE; + md_reads_context_flag = FALSE; + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); + for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) { + struct datum *entry_ptr; + entry_ptr = &(data[i]); - } else if ( ( actual_line_len != expected_line_len ) || - ( HDstrcmp(buffer, (*expected_output)[i]) != 0 ) ) { + lock_entry(file_ptr, i); - nerrors++; - if ( verbose ) { - HDfprintf(stdout, - "%d:%s: Unexpected data in trace file line %d.\n", - world_mpi_rank, fcn_name, i); - HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", - world_mpi_rank, fcn_name, (*expected_output)[i], - expected_line_len); - HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", - world_mpi_rank, fcn_name, buffer, - actual_line_len); + if (FALSE != entry_ptr->header.coll_access) { + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: Entry inserted independently marked as collective.\n", + world_mpi_rank, __func__); } - } else { - i++; - } - } + } - if ( trace_file_ptr != NULL ) { + /* Make sure coll entries do not cross the 80% threshold */ + HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size); + } + /* Restore collective metadata reads state */ + H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag); - HDfclose(trace_file_ptr); - trace_file_ptr = NULL; -#if 1 - HDremove(trace_file_name); -#endif + for (i = 0; i < (virt_num_data_entries); i++) { + unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET); } - } - max_nerrors = get_max_nerrors(); + if (fid >= 0) { + + if (!take_down_cache(fid, cache_ptr)) { + + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, __func__); + } + } + } - if ( world_mpi_rank == 0 ) { + /* verify that all instances of datum are back where the started + * and are clean. + */ - if ( max_nerrors == 0 ) { + for (i = 0; i < NUM_DATA_ENTRIES; i++) { + HDassert(data_index[i] == i); + HDassert(!(data[i].dirty)); + } - PASSED(); + /* compose the done message */ + mssg.req = DONE_REQ_CODE; + mssg.src = world_mpi_rank; + mssg.dest = world_server_mpi_rank; + mssg.mssg_num = -1; /* set by send function */ + mssg.base_addr = 0; /* not used */ + mssg.len = 0; /* not used */ + mssg.ver = 0; /* not used */ + mssg.count = 0; /* not used */ + mssg.magic = MSSG_MAGIC; - } else { + if (success) { - failures++; - H5_FAILED(); + success = send_mssg(&mssg, FALSE); + + if (!success) { + + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, __func__); + } + } } + virt_num_data_entries = temp; } - success = ( ( success ) && ( max_nerrors == 0 ) ); + max_nerrors = get_max_nerrors(); -#else /* H5_METADATA_TRACE_FILE */ + if (world_mpi_rank == 0) { - if ( world_mpi_rank == 0 ) { + if (max_nerrors == 0) { - SKIPPED(); + PASSED(); + } + else { - HDfprintf(stdout, " trace file support disabled.\n"); + failures++; + H5_FAILED(); + } } -#endif /* H5_METADATA_TRACE_FILE */ - - return(success); + success = ((success) && (max_nerrors == 0)); -} /* trace_file_check() */ + return (success); +} /* smoke_check_6() */ /***************************************************************************** * - * Function: main() - * - * Purpose: Main function for the parallel cache test. + * Function: main() * - * Return: Success: 0 + * Purpose: Main function for the parallel cache test. * - * Failure: 1 + * Return: Success: 0 * - * Programmer: JRM -- 12/23/05 - * - * Modifications: + * Failure: 1 * - * None. + * Programmer: JRM -- 12/23/05 * *****************************************************************************/ - int main(int argc, char **argv) { - const char * fcn_name = "main()"; - int express_test; + int express_test; unsigned u; - int mpi_size; - int mpi_rank; - int max_nerrors; + int mpi_size; + int mpi_rank; + int max_nerrors; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - world_mpi_size = mpi_size; - world_mpi_rank = mpi_rank; + world_mpi_size = mpi_size; + world_mpi_rank = mpi_rank; world_server_mpi_rank = mpi_size - 1; - world_mpi_comm = MPI_COMM_WORLD; + world_mpi_comm = MPI_COMM_WORLD; /* Attempt to turn off atexit post processing so that in case errors * happen during the test and the process is aborted, it will not get * hang in the atexit post processing in which it may try to make MPI * calls. By then, MPI calls may not work. */ - if (H5dont_atexit() < 0){ - printf("Failed to turn off atexit processing. Continue.\n"); - }; + if (H5dont_atexit() < 0) + HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); + H5open(); express_test = do_express_test(); -#if 0 /* JRM */ - express_test = 0; -#endif /* JRM */ - if ( express_test ) { - - virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES; - - } else { - - virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES; + if (express_test) + virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES; + else + virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES; + + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("Parallel metadata cache tests\n"); + HDprintf(" mpi_size = %d\n", mpi_size); + HDprintf(" express_test = %d\n", express_test); + HDprintf("===================================\n"); + } + + if (mpi_size < 3) { + if (MAINPROCESS) + HDprintf(" Need at least 3 processes. Exiting.\n"); + goto finish; } -#ifdef H5_HAVE_MPE - if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); } - virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES; -#endif /* H5_HAVE_MPE */ - - - if (MAINPROCESS){ - printf("===================================\n"); - printf("Parallel metadata cache tests\n"); - printf(" mpi_size = %d\n", mpi_size); - printf(" express_test = %d\n", express_test); - printf("===================================\n"); + if (NULL == (data = HDmalloc(NUM_DATA_ENTRIES * sizeof(*data)))) { + HDprintf(" Couldn't allocate data array. Exiting.\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + if (NULL == (data_index = HDmalloc(NUM_DATA_ENTRIES * sizeof(*data_index)))) { + HDprintf(" Couldn't allocate data index array. Exiting.\n"); + MPI_Abort(MPI_COMM_WORLD, -1); } - if ( mpi_size < 3 ) { - - if ( MAINPROCESS ) { - - printf(" Need at least 3 processes. Exiting.\n"); + HDmemset(filenames, 0, sizeof(filenames)); + for (int i = 0; i < NFILENAME; i++) { + if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) { + HDprintf("couldn't allocate filename array\n"); + MPI_Abort(MPI_COMM_WORLD, -1); } - goto finish; } set_up_file_communicator(); @@ -7175,82 +6958,61 @@ main(int argc, char **argv) */ /* setup file access property list with the world communicator */ - if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) { + if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n", world_mpi_rank, __func__); } - if ( H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0 ) { - + if (H5Pset_fapl_mpio(fapl, world_mpi_comm, MPI_INFO_NULL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 1.\n", world_mpi_rank, __func__); } /* fix the file names */ - for ( u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u ) - { - if ( h5_fixname(FILENAME[u], fapl, filenames[u], - sizeof(filenames[u])) == NULL ) { - + for (u = 0; u < sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; ++u) { + if (h5_fixname(FILENAME[u], fapl, filenames[u], PATH_MAX) == NULL) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: h5_fixname() failed.\n", world_mpi_rank, __func__); break; } } /* close the fapl before we set it up again */ - if ( H5Pclose(fapl) < 0 ) { + if (H5Pclose(fapl) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n", world_mpi_rank, __func__); } /* now create the fapl again, excluding the server process. */ - if ( world_mpi_rank != world_server_mpi_rank ) { + if (world_mpi_rank != world_server_mpi_rank) { /* setup file access property list */ - if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) { - nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", - world_mpi_rank, fcn_name); - } + if (FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS))) { + nerrors++; + if (verbose) + HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n", world_mpi_rank, __func__); } - if ( H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0 ) { - + if (H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0) { nerrors++; - if ( verbose ) { - HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", - world_mpi_rank, fcn_name); - } + if (verbose) + HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n", world_mpi_rank, __func__); } } setup_rand(); max_nerrors = get_max_nerrors(); - - if ( max_nerrors != 0 ) { + if (max_nerrors != 0) { /* errors in setup -- no point in continuing */ - - if ( world_mpi_rank == 0 ) { - + if (world_mpi_rank == 0) HDfprintf(stdout, "Errors in test initialization. Exiting.\n"); - } - goto finish; + goto finish; } /* run the tests */ @@ -7277,26 +7039,50 @@ main(int argc, char **argv) smoke_check_5(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); smoke_check_5(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); #endif + /* enable the collective metadata read property */ + if (world_mpi_rank != world_server_mpi_rank) { + if (H5Pset_all_coll_metadata_ops(fapl, TRUE) < 0) { + + nerrors++; + if (verbose) { + HDfprintf(stdout, "%d:%s: H5Pset_all_coll_metadata_ops() failed 1.\n", world_mpi_rank, + __func__); + } + } + } +#if 1 + smoke_check_6(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); + smoke_check_6(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); +#endif + #if 1 trace_file_check(H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); trace_file_check(H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); #endif finish: + if (data_index) + HDfree(data_index); + if (data) + HDfree(data); + /* make sure all processes are finished before final report, cleanup * and exit. */ + + if (file_mpi_comm != MPI_COMM_NULL) + MPI_Comm_free(&file_mpi_comm); + MPI_Barrier(MPI_COMM_WORLD); - if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); - if (failures){ - printf("***metadata cache tests detected %d failures***\n", - failures); - } - else{ - printf("metadata cache tests finished with no failures\n"); - } - printf("===================================\n"); + if (MAINPROCESS) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrors || failures) { + HDprintf("***metadata cache tests detected %d failures***\n", nerrors + failures); + } + else { + HDprintf("metadata cache tests finished with no failures\n"); + } + HDprintf("===================================\n"); } takedown_derived_types(); @@ -7308,6 +7094,5 @@ finish: MPI_Finalize(); /* cannot just return (failures) because exit code is limited to 1byte */ - return(failures != 0); + return (nerrors != 0 || failures != 0); } - diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c new file mode 100644 index 0000000..65c892d --- /dev/null +++ b/testpar/t_cache_image.c @@ -0,0 +1,3717 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Programmer: John Mainzer + * 7/13/15 + * + * This file contains tests specific to the cache image + * feature implemented in H5C.c + */ +#include "testphdf5.h" + +#include "cache_common.h" +#include "genall5.h" + +#define TEST_FILES_TO_CONSTRUCT 2 +#define CHUNK_SIZE 10 +#define DSET_SIZE (40 * CHUNK_SIZE) +#define MAX_NUM_DSETS 256 +#define PAR_NUM_DSETS 32 +#define PAGE_SIZE (4 * 1024) +#define PB_SIZE (64 * PAGE_SIZE) + +/* global variable declarations: */ + +const char *FILENAMES[] = {"t_cache_image_00", "t_cache_image_01", "t_cache_image_02", NULL}; + +/* local utility function declarations */ + +static void create_data_sets(hid_t file_id, int min_dset, int max_dset); +#if 0 /* keep pending full parallel cache image */ +static void delete_data_sets(hid_t file_id, int min_dset, int max_dset); +#endif + +static void open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, + const hbool_t read_only, const hbool_t set_mdci_fapl, const hbool_t config_fsm, + const hbool_t enable_page_buffer, const char *hdf_file_name, + const unsigned cache_image_flags, hid_t *file_id_ptr, H5F_t **file_ptr_ptr, + H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info, int l_facc_type, + const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write, + const int md_write_strat); + +static void verify_data_sets(hid_t file_id, int min_dset, int max_dset); + +/* local test function declarations */ + +static unsigned construct_test_file(int test_file_index); +static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size); +static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank); +static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank); + +static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size); +static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size); + +/* top level test function declarations */ +static unsigned verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank); +static unsigned verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank); + +static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size); + +/****************************************************************************/ +/***************************** Utility Functions ****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: construct_test_file() + * + * Purpose: This function attempts to mimic the typical "poor man's + * parallel use case in which the file is passed between + * processes, each of which open the file, write some data, + * close the file, and then pass control on to the next + * process. + * + * In this case, we create one group for each process, and + * populate it with a "zoo" of HDF5 objects selected to + * (ideally) exercise all HDF5 on disk data structures. + * + * The end result is a test file used verify that PHDF5 + * can open a file with a cache image. + * + * Cycle of operation + * + * 1) Create a HDF5 file with the cache image FAPL entry. + * + * Verify that the cache is informed of the cache image + * FAPL entry. + * + * Set all cache image flags, forcing full functionality. + * + * 2) Create a data set in the file. + * + * 3) Close the file. + * + * 4) Open the file. + * + * Verify that the metadata cache is instructed to load + * the metadata cache image. + * + * 5) Create a data set in the file. + * + * 6) Close the file. If enough datasets have been created + * goto 7. Otherwise return to 4. + * + * 7) Open the file R/O. + * + * Verify that the file contains a metadata cache image + * superblock extension message. + * + * 8) Verify all data sets. + * + * Verify that the cache image has been loaded. + * + * 9) close the file. + * + * Return: void + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +construct_test_file(int test_file_index) +{ + const char *fcn_name = "construct_test_file()"; + char filename[512]; + hbool_t show_progress = FALSE; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + int cp = 0; + int min_dset = 0; + int max_dset = 0; + MPI_Comm dummy_comm = MPI_COMM_WORLD; + MPI_Info dummy_info = MPI_INFO_NULL; + + pass = TRUE; + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name */ + if (pass) { + + HDassert(FILENAMES[test_file_index]); + + if (h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Create a HDF5 file with the cache image FAPL entry. + * + * Verify that the cache is informed of the cache image FAPL entry. + * + * Set flags forcing full function of the cache image feature. + */ + + if (pass) { + + open_hdf5_file(/* create_file */ TRUE, + /* mdci_sbem_expected */ FALSE, + /* read_only */ FALSE, + /* set_mdci_fapl */ TRUE, + /* config_fsm */ TRUE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 0); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Create a data set in the file. */ + + if (pass) { + + create_data_sets(file_id, min_dset++, max_dset++); + } + +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 0) { + + pass = FALSE; + failure_mssg = "metadata cache image block loaded(1)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + while ((pass) && (max_dset < MAX_NUM_DSETS)) { + + /* 4) Open the file. + * + * Verify that the metadata cache is instructed to load the + * metadata cache image. + */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ FALSE, + /* set_mdci_fapl */ TRUE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 0); + } + + if (show_progress) + HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n", fcn_name, cp, max_dset, pass); + + /* 5) Create a data set in the file. */ + + if (pass) { + + create_data_sets(file_id, min_dset++, max_dset++); + } + +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded == 0) { + + pass = FALSE; + failure_mssg = "metadata cache image block not loaded(1)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n", fcn_name, cp + 1, max_dset, pass); + + /* 6) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n", fcn_name, cp + 2, max_dset, pass); + } /* end while */ + cp += 3; + + /* 7) Open the file R/O. + * + * Verify that the file contains a metadata cache image + * superblock extension message. + */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ TRUE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 0); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Open and close all data sets. + * + * Verify that the cache image has been loaded. + */ + + if (pass) { + + verify_data_sets(file_id, 0, max_dset - 1); + } + +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded == 0) { + + pass = FALSE; + failure_mssg = "metadata cache image block not loaded(2)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 9) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + return !pass; + +} /* construct_test_file() */ + +/*------------------------------------------------------------------------- + * Function: create_data_sets() + * + * Purpose: If pass is TRUE on entry, create the specified data sets + * in the indicated file. + * + * Data sets and their contents must be well know, as we + * will verify that they contain the expected data later. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 7/15/15 + * + * Modifications: + * + * Added min_dset and max_dset parameters and supporting + * code. This allows the caller to specify a range of + * datasets to create. + * JRM -- 8/20/15 + * + *------------------------------------------------------------------------- + */ + +static void +create_data_sets(hid_t file_id, int min_dset, int max_dset) +{ + const char *fcn_name = "create_data_sets()"; + char dset_name[64]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l, m; + int data_chunk[CHUNK_SIZE][CHUNK_SIZE]; + herr_t status; + hid_t dataspace_id = -1; + hid_t filespace_ids[MAX_NUM_DSETS]; + hid_t memspace_id = -1; + hid_t dataset_ids[MAX_NUM_DSETS]; + hid_t properties = -1; + hsize_t dims[2]; + hsize_t a_size[2]; + hsize_t offset[2]; + hsize_t chunk_size[2]; + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + HDassert(0 <= min_dset); + HDassert(min_dset <= max_dset); + HDassert(max_dset < MAX_NUM_DSETS); + + /* create the datasets */ + + if (pass) { + + i = min_dset; + + while ((pass) && (i <= max_dset)) { + /* create a dataspace for the chunked dataset */ + dims[0] = DSET_SIZE; + dims[1] = DSET_SIZE; + dataspace_id = H5Screate_simple(2, dims, NULL); + + if (dataspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + + /* set the dataset creation plist to specify that the raw data is + * to be partitioned into 10X10 element chunks. + */ + + if (pass) { + + chunk_size[0] = CHUNK_SIZE; + chunk_size[1] = CHUNK_SIZE; + properties = H5Pcreate(H5P_DATASET_CREATE); + + if (properties < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate() failed."; + } + } + + if (pass) { + + if (H5Pset_chunk(properties, 2, chunk_size) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_chunk() failed."; + } + } + + /* create the dataset */ + if (pass) { + + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i); + dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, + properties, H5P_DEFAULT); + + if (dataset_ids[i] < 0) { + + pass = FALSE; + failure_mssg = "H5Dcreate() failed."; + } + } + + /* get the file space ID */ + if (pass) { + + filespace_ids[i] = H5Dget_space(dataset_ids[i]); + + if (filespace_ids[i] < 0) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + i++; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* create the mem space to be used to read and write chunks */ + if (pass) { + + dims[0] = CHUNK_SIZE; + dims[1] = CHUNK_SIZE; + memspace_id = H5Screate_simple(2, dims, NULL); + + if (memspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* select in memory hyperslab */ + if (pass) { + + offset[0] = 0; /*offset of hyperslab in memory*/ + offset[1] = 0; + a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* initialize all datasets on a round robin basis */ + i = 0; + while ((pass) && (i < DSET_SIZE)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + m = min_dset; + while ((pass) && (m <= max_dset)) { + /* initialize the slab */ + for (k = 0; k < CHUNK_SIZE; k++) { + for (l = 0; l < CHUNK_SIZE; l++) { + data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l; + } + } + + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /*offset of hyperslab in file*/ + offset[1] = (hsize_t)j; + a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk H5Sselect_hyperslab() failed."; + } + + /* write the chunk to file */ + status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id, filespace_ids[m], H5P_DEFAULT, + data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Dwrite() failed."; + } + m++; + } + j += CHUNK_SIZE; + } + + i += CHUNK_SIZE; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* read data from data sets and validate it */ + i = 0; + while ((pass) && (i < DSET_SIZE)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + m = min_dset; + while ((pass) && (m <= max_dset)) { + + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /* offset of hyperslab in file */ + offset[1] = (hsize_t)j; + a_size[0] = CHUNK_SIZE; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if (pass) { + + status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, memspace_id, filespace_ids[m], + H5P_DEFAULT, data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + } + + /* validate the slab */ + if (pass) { + + valid_chunk = TRUE; + for (k = 0; k < CHUNK_SIZE; k++) { + for (l = 0; l < CHUNK_SIZE; l++) { + if (data_chunk[k][l] != + ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) { + + valid_chunk = FALSE; + + if (verbose) { + + HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l, + data_chunk[k][l], + ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)); + HDfprintf(stdout, "m = %d, i = %d, j = %d, k = %d, l = %d\n", m, i, j, k, + l); + } + } + } + } + + if (!valid_chunk) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if (verbose) { + + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m); + } + } + } + m++; + } + j += CHUNK_SIZE; + } + i += CHUNK_SIZE; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* close the file spaces */ + i = min_dset; + while ((pass) && (i <= max_dset)) { + if (H5Sclose(filespace_ids[i]) < 0) { + + pass = FALSE; + failure_mssg = "H5Sclose() failed."; + } + i++; + } + + /* close the datasets */ + i = min_dset; + while ((pass) && (i <= max_dset)) { + if (H5Dclose(dataset_ids[i]) < 0) { + + pass = FALSE; + failure_mssg = "H5Dclose() failed."; + } + i++; + } + + /* close the mem space */ + if (pass) { + + if (H5Sclose(memspace_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + } + + return; + +} /* create_data_sets() */ + +/*------------------------------------------------------------------------- + * Function: delete_data_sets() + * + * Purpose: If pass is TRUE on entry, verify and then delete the + * dataset(s) indicated by min_dset and max_dset in the + * indicated file. + * + * Data sets and their contents must be well know, as we + * will verify that they contain the expected data later. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 10/31/16 + * + * Modifications: + * + * None. + * JRM -- 8/20/15 + * + *------------------------------------------------------------------------- + */ +#if 0 +/* this code will be needed to test full support of cache image + * in parallel -- keep it around against that day. + * + * -- JRM + */ +static void +delete_data_sets(hid_t file_id, int min_dset, int max_dset) +{ + const char * fcn_name = "delete_data_sets()"; + char dset_name[64]; + hbool_t show_progress = FALSE; + int cp = 0; + int i; + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + HDassert(0 <= min_dset); + HDassert(min_dset <= max_dset); + HDassert(max_dset < MAX_NUM_DSETS); + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* first, verify the contents of the target dataset(s) */ + verify_data_sets(file_id, min_dset, max_dset); + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* now delete the target datasets */ + if ( pass ) { + + i = min_dset; + + while ( ( pass ) && ( i <= max_dset ) ) + { + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i); + + if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) { + + pass = FALSE; + failure_mssg = "H5Ldelete() failed."; + } + + i++; + } + } + + if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + return; + +} /* delete_data_sets() */ +#endif + +/*------------------------------------------------------------------------- + * Function: open_hdf5_file() + * + * Purpose: If pass is true on entry, create or open the specified HDF5 + * and test to see if it has a metadata cache image superblock + * extension message. + * + * Set pass to FALSE and issue a suitable failure + * message if either the file contains a metadata cache image + * superblock extension and mdci_sbem_expected is TRUE, or + * vice versa. + * + * If mdci_sbem_expected is TRUE, also verify that the metadata + * cache has been advised of this. + * + * If read_only is TRUE, open the file read only. Otherwise + * open the file read/write. + * + * If set_mdci_fapl is TRUE, set the metadata cache image + * FAPL entry when opening the file, and verify that the + * metadata cache is notified. + * + * If config_fsm is TRUE, setup the persistent free space + * manager. Note that this flag may only be set if + * create_file is also TRUE. + * + * Return pointers to the cache data structure and file data + * structures. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 7/14/15 + * + * Modifications: + * + * Modified function to handle parallel file creates / opens. + * + * JRM -- 2/1/17 + * + * Modified function to handle + * + *------------------------------------------------------------------------- + */ + +static void +open_hdf5_file(const hbool_t create_file, const hbool_t mdci_sbem_expected, const hbool_t read_only, + const hbool_t set_mdci_fapl, const hbool_t config_fsm, const hbool_t enable_page_buffer, + const char *hdf_file_name, const unsigned cache_image_flags, hid_t *file_id_ptr, + H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr, MPI_Comm comm, MPI_Info info, int l_facc_type, + const hbool_t all_coll_metadata_ops, const hbool_t coll_metadata_write, + const int md_write_strat) +{ + const char *fcn_name = "open_hdf5_file()"; + hbool_t show_progress = FALSE; + hbool_t verbose = FALSE; + int cp = 0; + hid_t fapl_id = -1; + hid_t fcpl_id = -1; + hid_t file_id = -1; + herr_t result; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + H5C_cache_image_ctl_t image_ctl; + H5AC_cache_image_config_t cache_image_config = {H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, TRUE, FALSE, + H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE}; + + HDassert(!create_file || config_fsm); + + if (pass) { + /* opening the file both read only and with a cache image + * requested is a contradiction. We resolve it by ignoring + * the cache image request silently. + */ + if ((create_file && mdci_sbem_expected) || (create_file && read_only) || + (config_fsm && !create_file) || (create_file && enable_page_buffer && !config_fsm) || + (hdf_file_name == NULL) || ((set_mdci_fapl) && (cache_image_flags == 0)) || + ((set_mdci_fapl) && ((cache_image_flags & ~H5C_CI__ALL_FLAGS) != 0)) || (file_id_ptr == NULL) || + (file_ptr_ptr == NULL) || (cache_ptr_ptr == NULL) || + (l_facc_type != (l_facc_type & (FACC_MPIO)))) { + + failure_mssg = "Bad param(s) on entry to open_hdf5_file().\n"; + + pass = FALSE; + } + else if (verbose) { + + HDfprintf(stdout, "%s: HDF file name = \"%s\".\n", fcn_name, hdf_file_name); + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* create a file access property list. */ + if (pass) { + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + + if (fapl_id < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* call H5Pset_libver_bounds() on the fapl_id */ + if (pass) { + + if (H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_libver_bounds() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* get metadata cache image config -- verify that it is the default */ + if (pass) { + + result = H5Pget_mdc_image_config(fapl_id, &cache_image_config); + + if (result < 0) { + + pass = FALSE; + failure_mssg = "H5Pget_mdc_image_config() failed.\n"; + } + + if ((cache_image_config.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) || + (cache_image_config.generate_image != FALSE) || + (cache_image_config.save_resize_status != FALSE) || + (cache_image_config.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE)) { + + pass = FALSE; + failure_mssg = "Unexpected default cache image config.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* set metadata cache image fapl entry if indicated */ + if ((pass) && (set_mdci_fapl)) { + + /* set cache image config fields to taste */ + cache_image_config.generate_image = TRUE; + cache_image_config.save_resize_status = FALSE; + cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE; + + result = H5Pset_mdc_image_config(fapl_id, &cache_image_config); + + if (result < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_mdc_image_config() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the persistent free space manager if indicated */ + if ((pass) && (config_fsm)) { + + fcpl_id = H5Pcreate(H5P_FILE_CREATE); + + if (fcpl_id <= 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed."; + } + } + + if ((pass) && (config_fsm)) { + + if (H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) == FAIL) { + pass = FALSE; + failure_mssg = "H5Pset_file_space_strategy() failed.\n"; + } + } + + if ((pass) && (config_fsm)) { + + if (H5Pset_file_space_page_size(fcpl_id, PAGE_SIZE) == FAIL) { + + pass = FALSE; + failure_mssg = "H5Pset_file_space_page_size() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the page buffer if indicated */ + if ((pass) && (enable_page_buffer)) { + + if (H5Pset_page_buffer_size(fapl_id, PB_SIZE, 0, 0) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_page_buffer_size() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ((pass) && (l_facc_type == FACC_MPIO)) { + + /* set Parallel access with communicator */ + if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_fapl_mpio() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ((pass) && (l_facc_type == FACC_MPIO)) { + + if (H5Pset_all_coll_metadata_ops(fapl_id, all_coll_metadata_ops) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_all_coll_metadata_ops() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ((pass) && (l_facc_type == FACC_MPIO)) { + + if (H5Pset_coll_metadata_write(fapl_id, coll_metadata_write) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_coll_metadata_write() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ((pass) && (l_facc_type == FACC_MPIO)) { + + /* set the desired parallel metadata write strategy */ + H5AC_cache_config_t mdc_config; + + mdc_config.version = H5C__CURR_AUTO_SIZE_CTL_VER; + + if (H5Pget_mdc_config(fapl_id, &mdc_config) < 0) { + + pass = FALSE; + failure_mssg = "H5Pget_mdc_config() failed.\n"; + } + + mdc_config.metadata_write_strategy = md_write_strat; + + if (H5Pset_mdc_config(fapl_id, &mdc_config) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_mdc_config() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* open the file */ + if (pass) { + + if (create_file) { + + if (fcpl_id != -1) + + file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC, fcpl_id, fapl_id); + else + + file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + } + else { + + if (read_only) + + file_id = H5Fopen(hdf_file_name, H5F_ACC_RDONLY, fapl_id); + + else + + file_id = H5Fopen(hdf_file_name, H5F_ACC_RDWR, fapl_id); + } + + if (file_id < 0) { + + pass = FALSE; + failure_mssg = "H5Fcreate() or H5Fopen() failed.\n"; + } + else { + + file_ptr = (struct H5F_t *)H5VL_object_verify(file_id, H5I_FILE); + + if (file_ptr == NULL) { + + pass = FALSE; + failure_mssg = "Can't get file_ptr."; + + if (verbose) { + HDfprintf(stdout, "%s: Can't get file_ptr.\n", fcn_name); + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* get a pointer to the files internal data structure and then + * to the cache structure + */ + if (pass) { + + if (file_ptr->shared->cache == NULL) { + + pass = FALSE; + failure_mssg = "can't get cache pointer(1).\n"; + } + else { + + cache_ptr = file_ptr->shared->cache; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* verify expected page buffer status. At present, page buffering + * must be disabled in parallel -- hopefully this will change in the + * future. + */ + if (pass) { + + if ((file_ptr->shared->page_buf) && ((!enable_page_buffer) || (l_facc_type == FACC_MPIO))) { + + pass = FALSE; + failure_mssg = "page buffer unexpectedly enabled."; + } + else if ((file_ptr->shared->page_buf != NULL) && + ((enable_page_buffer) || (l_facc_type != FACC_MPIO))) { + + pass = FALSE; + failure_mssg = "page buffer unexpectedly disabled."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* verify expected metadata cache status */ + + /* get the cache image control structure from the cache, and verify + * that it contains the expected values. + * + * Then set the flags in this structure to the specified value. + */ + if (pass) { + + if (H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0) { + + pass = FALSE; + failure_mssg = "error returned by H5C_get_cache_image_config()."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (set_mdci_fapl) { + + if (read_only) { + + if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) || + (image_ctl.generate_image != FALSE) || (image_ctl.save_resize_status != FALSE) || + (image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) || + (image_ctl.flags != H5C_CI__ALL_FLAGS)) { + + pass = FALSE; + failure_mssg = "Unexpected image_ctl values(1).\n"; + } + } + else { + + if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) || + (image_ctl.generate_image != TRUE) || (image_ctl.save_resize_status != FALSE) || + (image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) || + (image_ctl.flags != H5C_CI__ALL_FLAGS)) { + + pass = FALSE; + failure_mssg = "Unexpected image_ctl values(2).\n"; + } + } + } + else { + + if ((image_ctl.version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION) || + (image_ctl.generate_image != FALSE) || (image_ctl.save_resize_status != FALSE) || + (image_ctl.entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE) || + (image_ctl.flags != H5C_CI__ALL_FLAGS)) { + + pass = FALSE; + failure_mssg = "Unexpected image_ctl values(3).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if ((pass) && (set_mdci_fapl)) { + + image_ctl.flags = cache_image_flags; + + if (H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0) { + + pass = FALSE; + failure_mssg = "error returned by H5C_set_cache_image_config()."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (cache_ptr->close_warning_received == TRUE) { + + pass = FALSE; + failure_mssg = "Unexpected value of close_warning_received.\n"; + } + + if (mdci_sbem_expected) { + + if (read_only) { + + if ((cache_ptr->load_image != TRUE) || (cache_ptr->delete_image != FALSE)) { + + pass = FALSE; + failure_mssg = "mdci sb extension message not present?\n"; + } + } + else { + + if ((cache_ptr->load_image != TRUE) || (cache_ptr->delete_image != TRUE)) { + + pass = FALSE; + failure_mssg = "mdci sb extension message not present?\n"; + } + } + } + else { + + if ((cache_ptr->load_image == TRUE) || (cache_ptr->delete_image == TRUE)) { + + pass = FALSE; + failure_mssg = "mdci sb extension message present?\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + *file_id_ptr = file_id; + *file_ptr_ptr = file_ptr; + *cache_ptr_ptr = cache_ptr; + } + + if (show_progress) { + HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n", fcn_name, cp++, pass); + + if (!pass) + HDfprintf(stdout, "%s: failure_mssg = %s\n", fcn_name, failure_mssg); + } + + return; + +} /* open_hdf5_file() */ + +/*------------------------------------------------------------------------- + * Function: par_create_dataset() + * + * Purpose: Collectively create a chunked dataset, and fill it with + * known values. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/4/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size) +{ + const char *fcn_name = "par_create_dataset()"; + char dset_name[256]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l; + int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE]; + hsize_t dims[3]; + hsize_t a_size[3]; + hsize_t offset[3]; + hsize_t chunk_size[3]; + hid_t status; + hid_t dataspace_id = -1; + hid_t memspace_id = -1; + hid_t dset_id = -1; + hid_t filespace_id = -1; + hid_t dcpl_id = -1; + hid_t dxpl_id = -1; + + show_progress = (show_progress && (mpi_rank == 0)); + verbose = (verbose && (mpi_rank == 0)); + + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num); + + if (show_progress) { + HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + } + + if (pass) { + + /* create a dataspace for the chunked dataset */ + dims[0] = (hsize_t)mpi_size; + dims[1] = DSET_SIZE; + dims[2] = DSET_SIZE; + dataspace_id = H5Screate_simple(3, dims, NULL); + + if (dataspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* set the dataset creation plist to specify that the raw data is + * to be partitioned into 1X10X10 element chunks. + */ + + if (pass) { + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + + if (dcpl_id < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_CREATE) failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + chunk_size[0] = 1; + chunk_size[1] = CHUNK_SIZE; + chunk_size[2] = CHUNK_SIZE; + + if (H5Pset_chunk(dcpl_id, 3, chunk_size) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_chunk() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* create the dataset */ + if (pass) { + + dset_id = + H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + + if (dset_id < 0) { + + pass = FALSE; + failure_mssg = "H5Dcreate() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* get the file space ID */ + if (pass) { + + filespace_id = H5Dget_space(dset_id); + + if (filespace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* create the mem space to be used to read and write chunks */ + if (pass) { + + dims[0] = 1; + dims[1] = CHUNK_SIZE; + dims[2] = CHUNK_SIZE; + memspace_id = H5Screate_simple(3, dims, NULL); + + if (memspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* select in memory hyperslab */ + if (pass) { + + offset[0] = 0; /* offset of hyperslab in memory */ + offset[1] = 0; + offset[2] = 0; + a_size[0] = 1; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + a_size[2] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the DXPL for collective I/O */ + if (pass) { + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + + if (dxpl_id < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* initialize the dataset with collective writes */ + i = 0; + while ((pass) && (i < DSET_SIZE)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n", fcn_name, cp, pass); + + /* initialize the slab */ + for (k = 0; k < CHUNK_SIZE; k++) { + for (l = 0; l < CHUNK_SIZE; l++) { + data_chunk[0][k][l] = + (DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n", fcn_name, cp, pass); + + /* select on disk hyperslab */ + offset[0] = (hsize_t)mpi_rank; /* offset of hyperslab in file */ + offset[1] = (hsize_t)i; + offset[2] = (hsize_t)j; + a_size[0] = (hsize_t)1; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + a_size[2] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk H5Sselect_hyperslab() failed."; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n", fcn_name, cp, pass); + + /* write the chunk to file */ + status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, dxpl_id, data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Dwrite() failed."; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n", fcn_name, cp, pass); + + j += CHUNK_SIZE; + } + + i += CHUNK_SIZE; + } + + cp++; + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* read data from data sets and validate it */ + i = 0; + while ((pass) && (i < DSET_SIZE)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + /* select on disk hyperslab */ + offset[0] = (hsize_t)mpi_rank; + offset[1] = (hsize_t)i; /* offset of hyperslab in file */ + offset[2] = (hsize_t)j; + a_size[0] = (hsize_t)1; + a_size[1] = CHUNK_SIZE; /* size of hyperslab */ + a_size[2] = CHUNK_SIZE; + + status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if (pass) { + + status = H5Dread(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, dxpl_id, data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "chunk read failed."; + } + } + + /* validate the slab */ + if (pass) { + + valid_chunk = TRUE; + for (k = 0; k < CHUNK_SIZE; k++) { + for (l = 0; l < CHUNK_SIZE; l++) { + if (data_chunk[0][k][l] != + ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num)) { + + valid_chunk = FALSE; + + if (verbose) { + + HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l, + data_chunk[0][k][l], + ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + + l + dset_num)); + HDfprintf(stdout, "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n", dset_num, + i, j, k, l); + } + } + } + } + + if (!valid_chunk) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if (verbose) { + + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, dset_num); + } + } + } + j += CHUNK_SIZE; + } + i += CHUNK_SIZE; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* close the data space */ + if ((pass) && (H5Sclose(dataspace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(dataspace_id) failed."; + } + + /* close the file space */ + if ((pass) && (H5Sclose(filespace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(filespace_id) failed."; + } + + /* close the dataset */ + if ((pass) && (H5Dclose(dset_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Dclose(dset_id) failed."; + } + + /* close the mem space */ + if ((pass) && (H5Sclose(memspace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + + /* close the dataset creation property list */ + if ((pass) && (H5Pclose(dcpl_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Pclose(dcpl) failed."; + } + + /* close the data access property list */ + if ((pass) && (H5Pclose(dxpl_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Pclose(dxpl) failed."; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return; + +} /* par_create_dataset() */ + +/*------------------------------------------------------------------------- + * Function: par_delete_dataset() + * + * Purpose: Collectively delete the specified dataset. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/6/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank) +{ + const char *fcn_name = "par_delete_dataset()"; + char dset_name[256]; + hbool_t show_progress = FALSE; + int cp = 0; + + show_progress = (show_progress && (mpi_rank == 0)); + + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num); + + if (show_progress) { + HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + } + + /* verify the target dataset */ + if (pass) { + + par_verify_dataset(dset_num, file_id, mpi_rank); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* delete the target dataset */ + if (pass) { + + if (H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) { + + pass = FALSE; + failure_mssg = "H5Ldelete() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return; + +} /* par_delete_dataset() */ + +/*------------------------------------------------------------------------- + * Function: par_insert_cache_image() + * + * Purpose: Insert a cache image in the supplied file. + * + * At present, cache image is not enabled in the parallel + * so we have to insert the cache image with a serial + * process. Do this via a fork and an execv from process 0. + * All processes wait until the child process completes, and + * then return. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/8/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size) +{ + if (pass) { + + if (mpi_rank == 0) { /* insert cache image in supplied test file */ + + if (!serial_insert_cache_image(file_name_idx, mpi_size)) { + HDfprintf(stderr, "\n\nCache image insertion failed.\n"); + HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg); + HDexit(EXIT_FAILURE); + } + } + } + + if (pass) { + + /* make sure insertion of the cache image is complete + * before proceeding + */ + MPI_Barrier(MPI_COMM_WORLD); + } + + return; + +} /* par_insert_cache_image() */ + +/*------------------------------------------------------------------------- + * Function: par_verify_dataset() + * + * Purpose: Collectively verify the contents of a chunked dataset. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/6/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank) +{ + const char *fcn_name = "par_verify_dataset()"; + char dset_name[256]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l; + int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE]; + hsize_t dims[3]; + hsize_t a_size[3]; + hsize_t offset[3]; + hid_t status; + hid_t memspace_id = -1; + hid_t dset_id = -1; + hid_t filespace_id = -1; + hid_t dxpl_id = -1; + + show_progress = (show_progress && (mpi_rank == 0)); + verbose = (verbose && (mpi_rank == 0)); + + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num); + + if (show_progress) { + HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + } + + if (pass) { + + /* open the dataset */ + + dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT); + + if (dset_id < 0) { + + pass = FALSE; + failure_mssg = "H5Dopen2() failed."; + } + } + + /* get the file space ID */ + if (pass) { + + filespace_id = H5Dget_space(dset_id); + + if (filespace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* create the mem space to be used to read */ + if (pass) { + + dims[0] = 1; + dims[1] = CHUNK_SIZE; + dims[2] = CHUNK_SIZE; + memspace_id = H5Screate_simple(3, dims, NULL); + + if (memspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* select in memory hyperslab */ + if (pass) { + + offset[0] = 0; /* offset of hyperslab in memory */ + offset[1] = 0; + offset[2] = 0; + a_size[0] = 1; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + a_size[2] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the DXPL for collective I/O */ + if (pass) { + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + + if (dxpl_id < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* read data from data sets and validate it */ + i = 0; + while ((pass) && (i < DSET_SIZE)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + /* select on disk hyperslab */ + offset[0] = (hsize_t)mpi_rank; + offset[1] = (hsize_t)i; /* offset of hyperslab in file */ + offset[2] = (hsize_t)j; + a_size[0] = (hsize_t)1; + a_size[1] = CHUNK_SIZE; /* size of hyperslab */ + a_size[2] = CHUNK_SIZE; + + status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if (pass) { + + status = H5Dread(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, dxpl_id, data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "chunk read failed."; + } + } + + /* validate the slab */ + if (pass) { + + valid_chunk = TRUE; + for (k = 0; k < CHUNK_SIZE; k++) { + for (l = 0; l < CHUNK_SIZE; l++) { + if (data_chunk[0][k][l] != + ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + l + dset_num)) { + + valid_chunk = FALSE; + + if (verbose) { + + HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l, + data_chunk[0][k][l], + ((DSET_SIZE * DSET_SIZE * mpi_rank) + (DSET_SIZE * (i + k)) + j + + l + dset_num)); + HDfprintf(stdout, "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n", dset_num, + i, j, k, l); + } + } + } + } + + if (!valid_chunk) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if (verbose) { + + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, dset_num); + } + } + } + j += CHUNK_SIZE; + } + i += CHUNK_SIZE; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* close the file space */ + if ((pass) && (H5Sclose(filespace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(filespace_id) failed."; + } + + /* close the dataset */ + if ((pass) && (H5Dclose(dset_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Dclose(dset_id) failed."; + } + + /* close the mem space */ + if ((pass) && (H5Sclose(memspace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + + /* close the data access property list */ + if ((pass) && (H5Pclose(dxpl_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Pclose(dxpl) failed."; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return; + +} /* par_verify_dataset() */ + +/*------------------------------------------------------------------------- + * Function: serial_insert_cache_image() + * + * Purpose: Insert a cache image in the supplied file. + * + * To populate the cache image, validate the contents + * of the file before closing. + * + * On failure, print an appropriate error message and + * return FALSE. + * + * Return: TRUE if succussful, FALSE otherwise. + * + * Programmer: John Mainzer + * 3/8/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static hbool_t +serial_insert_cache_image(int file_name_idx, int mpi_size) +{ + const char *fcn_name = "serial_insert_cache_image()"; + char filename[512]; + hbool_t show_progress = FALSE; + int cp = 0; + int i; + int num_dsets = PAR_NUM_DSETS; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + MPI_Comm dummy_comm = MPI_COMM_WORLD; + MPI_Info dummy_info = MPI_INFO_NULL; + + pass = TRUE; + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) setup the file name */ + if (pass) { + + HDassert(FILENAMES[file_name_idx]); + + if (h5_fixname(FILENAMES[file_name_idx], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + HDfprintf(stdout, "h5_fixname() failed.\n"); + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Open the PHDF5 file with the cache image FAPL entry. + */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ FALSE, + /* read_only */ FALSE, + /* set_mdci_fapl */ TRUE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ dummy_comm, + /* info */ dummy_info, + /* l_facc_type */ 0, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ 1); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Validate contents of the file */ + + i = 0; + while ((pass) && (i < num_dsets)) { + + serial_verify_dataset(i, file_id, mpi_size); + i++; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Close the file */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return pass; + +} /* serial_insert_cache_image() */ + +/*------------------------------------------------------------------------- + * Function: serial_verify_dataset() + * + * Purpose: Verify the contents of a chunked dataset. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 3/6/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size) +{ + const char *fcn_name = "serial_verify_dataset()"; + char dset_name[256]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l, m; + int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE]; + hsize_t dims[3]; + hsize_t a_size[3]; + hsize_t offset[3]; + hid_t status; + hid_t memspace_id = -1; + hid_t dset_id = -1; + hid_t filespace_id = -1; + + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num); + + if (show_progress) { + HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name); + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + } + + if (pass) { + + /* open the dataset */ + + dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT); + + if (dset_id < 0) { + + pass = FALSE; + failure_mssg = "H5Dopen2() failed."; + } + } + + /* get the file space ID */ + if (pass) { + + filespace_id = H5Dget_space(dset_id); + + if (filespace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* create the mem space to be used to read */ + if (pass) { + + dims[0] = 1; + dims[1] = CHUNK_SIZE; + dims[2] = CHUNK_SIZE; + memspace_id = H5Screate_simple(3, dims, NULL); + + if (memspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* select in memory hyperslab */ + if (pass) { + + offset[0] = 0; /* offset of hyperslab in memory */ + offset[1] = 0; + offset[2] = 0; + a_size[0] = 1; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + a_size[2] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* read data from data sets and validate it */ + i = 0; + while ((pass) && (i < mpi_size)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + k = 0; + while ((pass) && (k < DSET_SIZE)) { + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /* offset of hyperslab in file */ + offset[1] = (hsize_t)j; /* offset of hyperslab in file */ + offset[2] = (hsize_t)k; + a_size[0] = (hsize_t)1; + a_size[1] = CHUNK_SIZE; /* size of hyperslab */ + a_size[2] = CHUNK_SIZE; + + status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if (pass) { + + status = + H5Dread(dset_id, H5T_NATIVE_INT, memspace_id, filespace_id, H5P_DEFAULT, data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "chunk read failed."; + } + } + + /* validate the slab */ + if (pass) { + + valid_chunk = TRUE; + + for (l = 0; l < CHUNK_SIZE; l++) { + for (m = 0; m < CHUNK_SIZE; m++) { + if (data_chunk[0][l][m] != + ((DSET_SIZE * DSET_SIZE * i) + (DSET_SIZE * (j + l)) + k + m + dset_num)) { + + valid_chunk = FALSE; + + if (verbose) { + + HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", j, k, + data_chunk[0][j][k], + ((DSET_SIZE * DSET_SIZE * i) + (DSET_SIZE * (j + l)) + k + m + + dset_num)); + HDfprintf(stdout, + "dset_num = %d, i = %d, j = %d, k = %d, l = %d, m = %d\n", + dset_num, i, j, k, l, m); + } + } + } + } + + if (!valid_chunk) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if (verbose) { + + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", j, k, dset_num); + } + } + } + k += CHUNK_SIZE; + } + j += CHUNK_SIZE; + } + i++; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* close the file space */ + if ((pass) && (H5Sclose(filespace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(filespace_id) failed."; + } + + /* close the dataset */ + if ((pass) && (H5Dclose(dset_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Dclose(dset_id) failed."; + } + + /* close the mem space */ + if ((pass) && (H5Sclose(memspace_id) < 0)) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return; + +} /* serial_verify_dataset() */ + +/*------------------------------------------------------------------------- + * Function: verify_data_sets() + * + * Purpose: If pass is TRUE on entry, verify that the data sets in the + * file exist and contain the expected data. + * + * Note that these data sets were created by + * create_data_sets() above. Thus any changes in that + * function must be reflected in this function, and + * vise-versa. + * + * On failure, set pass to FALSE, and set failure_mssg + * to point to an appropriate failure message. + * + * Do nothing if pass is FALSE on entry. + * + * Return: void + * + * Programmer: John Mainzer + * 7/15/15 + * + * Modifications: + * + * Added min_dset and max_dset parameters and supporting + * code. This allows the caller to specify a range of + * datasets to verify. + * JRM -- 8/20/15 + * + *------------------------------------------------------------------------- + */ + +static void +verify_data_sets(hid_t file_id, int min_dset, int max_dset) +{ + const char *fcn_name = "verify_data_sets()"; + char dset_name[64]; + hbool_t show_progress = FALSE; + hbool_t valid_chunk; + hbool_t verbose = FALSE; + int cp = 0; + int i, j, k, l, m; + int data_chunk[CHUNK_SIZE][CHUNK_SIZE]; + herr_t status; + hid_t filespace_ids[MAX_NUM_DSETS]; + hid_t memspace_id = -1; + hid_t dataset_ids[MAX_NUM_DSETS]; + hsize_t dims[2]; + hsize_t a_size[2]; + hsize_t offset[2]; + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + HDassert(0 <= min_dset); + HDassert(min_dset <= max_dset); + HDassert(max_dset < MAX_NUM_DSETS); + + /* open the datasets */ + + if (pass) { + + i = min_dset; + + while ((pass) && (i <= max_dset)) { + /* open the dataset */ + if (pass) { + + HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i); + dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT); + + if (dataset_ids[i] < 0) { + + pass = FALSE; + failure_mssg = "H5Dopen2() failed."; + } + } + + /* get the file space ID */ + if (pass) { + + filespace_ids[i] = H5Dget_space(dataset_ids[i]); + + if (filespace_ids[i] < 0) { + + pass = FALSE; + failure_mssg = "H5Dget_space() failed."; + } + } + + i++; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* create the mem space to be used to read and write chunks */ + if (pass) { + + dims[0] = CHUNK_SIZE; + dims[1] = CHUNK_SIZE; + memspace_id = H5Screate_simple(2, dims, NULL); + + if (memspace_id < 0) { + + pass = FALSE; + failure_mssg = "H5Screate_simple() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* select in memory hyperslab */ + if (pass) { + + offset[0] = 0; /*offset of hyperslab in memory*/ + offset[1] = 0; + a_size[0] = CHUNK_SIZE; /*size of hyperslab*/ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* read data from data sets and validate it */ + i = 0; + while ((pass) && (i < DSET_SIZE)) { + j = 0; + while ((pass) && (j < DSET_SIZE)) { + m = min_dset; + while ((pass) && (m <= max_dset)) { + + /* select on disk hyperslab */ + offset[0] = (hsize_t)i; /* offset of hyperslab in file */ + offset[1] = (hsize_t)j; + a_size[0] = CHUNK_SIZE; /* size of hyperslab */ + a_size[1] = CHUNK_SIZE; + status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET, offset, NULL, a_size, NULL); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + + /* read the chunk from file */ + if (pass) { + + status = H5Dread(dataset_ids[m], H5T_NATIVE_INT, memspace_id, filespace_ids[m], + H5P_DEFAULT, data_chunk); + + if (status < 0) { + + pass = FALSE; + failure_mssg = "disk hyperslab create failed."; + } + } + + /* validate the slab */ + if (pass) { + + valid_chunk = TRUE; + for (k = 0; k < CHUNK_SIZE; k++) { + for (l = 0; l < CHUNK_SIZE; l++) { + if (data_chunk[k][l] != + ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)) { + + valid_chunk = FALSE; + + if (verbose) { + + HDfprintf(stdout, "data_chunk[%0d][%0d] = %0d, expect %0d.\n", k, l, + data_chunk[k][l], + ((DSET_SIZE * DSET_SIZE * m) + (DSET_SIZE * (i + k)) + j + l)); + HDfprintf(stdout, "m = %d, i = %d, j = %d, k = %d, l = %d\n", m, i, j, k, + l); + } + } + } + } + + if (!valid_chunk) { + + pass = FALSE; + failure_mssg = "slab validation failed."; + + if (verbose) { + + HDfprintf(stdout, "Chunk (%0d, %0d) in /dset%03d is invalid.\n", i, j, m); + } + } + } + m++; + } + j += CHUNK_SIZE; + } + i += CHUNK_SIZE; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++); + + /* close the file spaces */ + i = min_dset; + while ((pass) && (i <= max_dset)) { + if (H5Sclose(filespace_ids[i]) < 0) { + + pass = FALSE; + failure_mssg = "H5Sclose() failed."; + } + i++; + } + + /* close the datasets */ + i = min_dset; + while ((pass) && (i <= max_dset)) { + if (H5Dclose(dataset_ids[i]) < 0) { + + pass = FALSE; + failure_mssg = "H5Dclose() failed."; + } + i++; + } + + /* close the mem space */ + if (pass) { + + if (H5Sclose(memspace_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Sclose(memspace_id) failed."; + } + } + + return; + +} /* verify_data_sets() */ + +/****************************************************************************/ +/******************************* Test Functions *****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: verify_cache_image_RO() + * + * Purpose: Verify that a HDF5 file containing a cache image is + * opened R/O and read correctly by PHDF5 with the specified + * metadata write strategy. + * + * Basic cycle of operation is as follows: + * + * 1) Open the test file created at the beginning of this + * test read only. + * + * Verify that the file contains a cache image. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + * + * 2) Verify that the file contains the expected data. + * + * 3) Close the file. + * + * 4) Open the file R/O, and verify that it still contains + * a cache image. + * + * 5) Verify that the file contains the expected data. + * + * 6) Close the file. + * + * Return: void + * + * Programmer: John Mainzer + * 3/11/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank) +{ + const char *fcn_name = "verify_cache_image_RO()"; + char filename[512]; + hbool_t show_progress = FALSE; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + int cp = 0; + + pass = TRUE; + + if (mpi_rank == 0) { + + switch (md_write_strat) { + + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + TESTING("parallel CI load test -- proc0 md write -- R/O"); + break; + + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + TESTING("parallel CI load test -- dist md write -- R/O"); + break; + + default: + TESTING("parallel CI load test -- unknown md write -- R/o"); + pass = FALSE; + break; + } + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name */ + if (pass) { + + if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file created at the beginning of this test. + * + * Verify that the file contains a cache image. + */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ TRUE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ MPI_COMM_WORLD, + /* info */ MPI_INFO_NULL, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ md_write_strat); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Verify that the file contains the expected data. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + */ + + if (pass) { + + verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1); + } + + /* Verify that only process 0 reads the cache image. */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) || + ((mpi_rank > 0) && (cache_ptr->images_read != 0))) { + + pass = FALSE; + failure_mssg = "unexpected images_read."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that all other processes receive the cache image block + * from process 0. + * + * Since we have already verified that only process 0 has read the + * image, it is sufficient to verify that the image was loaded on + * all processes. + */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 1) { + + pass = FALSE; + failure_mssg = "Image not loaded?."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Open the file, and verify that it doesn't contain a cache image. */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ TRUE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ MPI_COMM_WORLD, + /* info */ MPI_INFO_NULL, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ md_write_strat); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Verify that the file contains the expected data. */ + + if (pass) { + + verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1); + } + +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 1) { + + pass = FALSE; + failure_mssg = "metadata cache image block not loaded(2)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* 6) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return !pass; + +} /* verify_cache_image_RO() */ + +/*------------------------------------------------------------------------- + * Function: verify_cache_image_RW() + * + * Purpose: Verify that a HDF5 file containing a cache image is + * opened and read correctly by PHDF5 with the specified + * metadata write strategy. + * + * Basic cycle of operation is as follows: + * + * 1) Open the test file created at the beginning of this + * test. + * + * Verify that the file contains a cache image. + * + * 2) Verify that the file contains the expected data. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + * + * + * 3) Close the file. + * + * 4) Open the file, and verify that it doesn't contain + * a cache image. + * + * 5) Verify that the file contains the expected data. + * + * 6) Close the file. + * + * 7) Delete the file. + * + * Return: void + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank) +{ + const char *fcn_name = "verify_cache_imageRW()"; + char filename[512]; + hbool_t show_progress = FALSE; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + int cp = 0; + + pass = TRUE; + + if (mpi_rank == 0) { + + switch (md_write_strat) { + + case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY: + TESTING("parallel CI load test -- proc0 md write -- R/W"); + break; + + case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED: + TESTING("parallel CI load test -- dist md write -- R/W"); + break; + + default: + TESTING("parallel CI load test -- unknown md write -- R/W"); + pass = FALSE; + break; + } + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name */ + if (pass) { + + if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file created at the beginning of this test. + * + * Verify that the file contains a cache image. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ FALSE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ MPI_COMM_WORLD, + /* info */ MPI_INFO_NULL, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ md_write_strat); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Verify that the file contains the expected data. + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + */ + if (pass) { + + verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1); + } + + /* Verify that only process 0 reads the cache image. */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) || + ((mpi_rank > 0) && (cache_ptr->images_read != 0))) { + + pass = FALSE; + failure_mssg = "unexpected images_read."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that all other processes receive the cache image block + * from process 0. + * + * Since we have already verified that only process 0 has read the + * image, it is sufficient to verify that the image was loaded on + * all processes. + */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 1) { + + pass = FALSE; + failure_mssg = "Image not loaded?."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Open the file, and verify that it doesn't contain a cache image. */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ FALSE, + /* read_only */ FALSE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ MPI_COMM_WORLD, + /* info */ MPI_INFO_NULL, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ FALSE, + /* md_write_strat */ md_write_strat); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Verify that the file contains the expected data. */ + + if (pass) { + + verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1); + } + +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 0) { + + pass = FALSE; + failure_mssg = "metadata cache image block loaded(1)."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + /* 6) Close the file. */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Delete the file. */ + + if (pass) { + + /* wait for everyone to close the file */ + MPI_Barrier(MPI_COMM_WORLD); + + if ((mpi_rank == 0) && (HDremove(filename) < 0)) { + + pass = FALSE; + failure_mssg = "HDremove() failed.\n"; + } + } + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return !pass; + +} /* verify_cache_imageRW() */ + +/***************************************************************************** + * + * Function: smoke_check_1() + * + * Purpose: Initial smoke check to verify correct behaviour of cache + * image in combination with parallel. + * + * As cache image is currently disabled in the parallel case, + * we construct a test file in parallel, verify it in serial + * and generate a cache image in passing, and then verify + * it again in parallel. + * + * In passing, also verify that page buffering is silently + * disabled in the parallel case. Needless to say, this part + * of the test will have to be re-worked when and if page + * buffering is supported in parallel. + * + * Return: Success: TRUE + * + * Failure: FALSE + * + * Programmer: JRM -- 3/6/17 + * + *****************************************************************************/ +static hbool_t +smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size) +{ + const char *fcn_name = "smoke_check_1()"; + char filename[512]; + hbool_t show_progress = FALSE; + hid_t file_id = -1; + H5F_t *file_ptr = NULL; + H5C_t *cache_ptr = NULL; + int cp = 0; + int i; + int num_dsets = PAR_NUM_DSETS; + int test_file_index = 2; + h5_stat_size_t file_size; + + pass = TRUE; + + if (mpi_rank == 0) { + + TESTING("parallel cache image smoke check 1"); + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name */ + if (pass) { + + HDassert(FILENAMES[test_file_index]); + + if (h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Create a PHDF5 file without the cache image FAPL entry. + * + * Verify that a cache image is not requested + */ + + if (pass) { + + open_hdf5_file(/* create_file */ TRUE, + /* mdci_sbem_expected */ FALSE, + /* read_only */ FALSE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ TRUE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ mpi_comm, + /* info */ mpi_info, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ TRUE, + /* md_write_strat */ 1); + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Create datasets in the file */ + + i = 0; + while ((pass) && (i < num_dsets)) { + + par_create_dataset(i, file_id, mpi_rank, mpi_size); + i++; + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Verify the datasets in the file */ + + i = 0; + while ((pass) && (i < num_dsets)) { + + par_verify_dataset(i, file_id, mpi_rank); + i++; + } + + /* 4) Close the file */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed.\n"; + } + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5 Insert a cache image into the file */ + + if (pass) { + + par_insert_cache_image(test_file_index, mpi_rank, mpi_size); + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Open the file R/O */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ TRUE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ mpi_comm, + /* info */ mpi_info, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ TRUE, + /* md_write_strat */ 1); + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Verify the datasets in the file backwards + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + */ + + i = num_dsets - 1; + while ((pass) && (i >= 0)) { + + par_verify_dataset(i, file_id, mpi_rank); + i--; + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that only process 0 reads the cache image. */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) || + ((mpi_rank > 0) && (cache_ptr->images_read != 0))) { + + pass = FALSE; + failure_mssg = "unexpected images_read."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that all other processes receive the cache image block + * from process 0. + * + * Since we have already verified that only process 0 has read the + * image, it is sufficient to verify that the image was loaded on + * all processes. + */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 1) { + + pass = FALSE; + failure_mssg = "Image not loaded?."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the file */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed."; + } + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 9) Open the file */ + + if (pass) { + + open_hdf5_file(/* create_file */ FALSE, + /* mdci_sbem_expected */ TRUE, + /* read_only */ FALSE, + /* set_mdci_fapl */ FALSE, + /* config_fsm */ FALSE, + /* enable_page_buffer */ FALSE, + /* hdf_file_name */ filename, + /* cache_image_flags */ H5C_CI__ALL_FLAGS, + /* file_id_ptr */ &file_id, + /* file_ptr_ptr */ &file_ptr, + /* cache_ptr_ptr */ &cache_ptr, + /* comm */ mpi_comm, + /* info */ mpi_info, + /* l_facc_type */ FACC_MPIO, + /* all_coll_metadata_ops */ FALSE, + /* coll_metadata_write */ TRUE, + /* md_write_strat */ 1); + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 10) Verify the datasets in the file + * + * Verify that only process 0 reads the cache image. + * + * Verify that all other processes receive the cache + * image block from process 0. + */ + + i = 0; + while ((pass) && (i < num_dsets)) { + + par_verify_dataset(i, file_id, mpi_rank); + i++; + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that only process 0 reads the cache image. */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (((mpi_rank == 0) && (cache_ptr->images_read != 1)) || + ((mpi_rank > 0) && (cache_ptr->images_read != 0))) { + + pass = FALSE; + failure_mssg = "unexpected images_read."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Verify that all other processes receive the cache image block + * from process 0. + * + * Since we have already verified that only process 0 has read the + * image, it is sufficient to verify that the image was loaded on + * all processes. + */ +#if H5C_COLLECT_CACHE_STATS + if (pass) { + + if (cache_ptr->images_loaded != 1) { + + pass = FALSE; + failure_mssg = "Image not loaded?."; + } + } +#endif /* H5C_COLLECT_CACHE_STATS */ + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 11) Delete the datasets in the file */ + + i = 0; + while ((pass) && (i < num_dsets)) { + + par_delete_dataset(i, file_id, mpi_rank); + i++; + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 12) Close the file */ + + if (pass) { + + if (H5Fclose(file_id) < 0) { + + pass = FALSE; + failure_mssg = "H5Fclose() failed."; + } + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 13) Get the size of the file. Verify that it is less + * than 20 KB. Without deletions and persistent free + * space managers, size size is about 30 MB, so this + * is sufficient to verify that the persistent free + * space managers are more or less doing their job. + * + * Note that this test will have to change if we use + * a larger page size. + */ + if (pass) { + + if ((file_size = h5_get_file_size(filename, H5P_DEFAULT)) < 0) { + + pass = FALSE; + failure_mssg = "h5_get_file_size() failed."; + } + else if (file_size > 20 * 1024) { + + pass = FALSE; + failure_mssg = "unexpectedly large file size."; + } + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 14) Delete the file */ + + if (pass) { + + /* wait for everyone to close the file */ + MPI_Barrier(MPI_COMM_WORLD); + + if ((mpi_rank == 0) && (HDremove(filename) < 0)) { + + pass = FALSE; + failure_mssg = "HDremove() failed.\n"; + } + } + + if ((mpi_rank == 0) && (show_progress)) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + + return !pass; + +} /* smoke_check_1() */ + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Run parallel tests on the cache image feature. + * + * At present, cache image is disabled in parallel, and + * thus these tests are restricted to verifying that a + * file with a cache image can be opened in the parallel + * case, and verifying that instructions to create a + * cache image are ignored in the parallel case. + * + * WARNING: This test uses fork() and execve(), and + * therefore will not run on Windows. + * + * Return: Success: 0 + * + * Failure: 1 + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +int +main(int argc, char **argv) +{ + unsigned nerrs = 0; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + int mpi_size; + int mpi_rank; + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Attempt to turn off atexit post processing so that in case errors + * happen during the test and the process is aborted, it will not get + * hang in the atexit post processing in which it may try to make MPI + * calls. By then, MPI calls may not work. + */ + if (H5dont_atexit() < 0) + HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); + + H5open(); + + if (mpi_rank == 0) { + HDprintf("===================================\n"); + HDprintf("Parallel metadata cache image tests\n"); + HDprintf(" mpi_size = %d\n", mpi_size); + HDprintf("===================================\n"); + } + + if (mpi_size < 2) { + if (mpi_rank == 0) + HDprintf(" Need at least 2 processes. Exiting.\n"); + goto finish; + } + + if (mpi_rank == 0) { /* create test files */ + int i; + + HDfprintf(stdout, "Constructing test files: \n"); + HDfflush(stdout); + + i = 0; + while ((FILENAMES[i] != NULL) && (i < TEST_FILES_TO_CONSTRUCT)) { + HDfprintf(stdout, " writing %s ... ", FILENAMES[i]); + HDfflush(stdout); + construct_test_file(i); + + if (pass) { + HDprintf("done.\n"); + HDfflush(stdout); + } + else { + HDprintf("failed.\n"); + HDexit(EXIT_FAILURE); + } + i++; + } + HDfprintf(stdout, "Test file construction complete.\n"); + } + + /* can't start test until test files exist */ + MPI_Barrier(MPI_COMM_WORLD); + + nerrs += verify_cache_image_RO(0, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank); + nerrs += verify_cache_image_RO(1, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank); + nerrs += verify_cache_image_RW(0, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank); + nerrs += verify_cache_image_RW(1, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank); + nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size); + +finish: + + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(MPI_COMM_WORLD); + + if (mpi_rank == 0) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrs > 0) + HDprintf("***metadata cache image tests detected %d failures***\n", nerrs); + else + HDprintf("metadata cache image tests finished with no failures\n"); + HDprintf("===================================\n"); + } + + /* close HDF5 library */ + H5close(); + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrs) because exit code is limited to 1byte */ + return (nerrs > 0); + +} /* main() */ diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c index 05fd2fc..ac5b90b 100644 --- a/testpar/t_chunk_alloc.c +++ b/testpar/t_chunk_alloc.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -22,23 +19,22 @@ */ #include "testphdf5.h" -static int mpi_size, mpi_rank; +static int mpi_size, mpi_rank; -#define DSET_NAME "ExtendibleArray" -#define CHUNK_SIZE 1000 /* #elements per chunk */ -#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */ -#define CLOSE 1 -#define NO_CLOSE 0 +#define DSET_NAME "ExtendibleArray" +#define CHUNK_SIZE 1000 /* #elements per chunk */ +#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */ +#define CLOSE 1 +#define NO_CLOSE 0 static MPI_Offset get_filesize(const char *filename) { - int mpierr; - MPI_File fd; - MPI_Offset filesize; + int mpierr; + MPI_File fd; + MPI_Offset filesize; - mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, - MPI_INFO_NULL, &fd); + mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd); VRFY((mpierr == MPI_SUCCESS), ""); mpierr = MPI_File_get_size(fd, &filesize); @@ -47,21 +43,12 @@ get_filesize(const char *filename) mpierr = MPI_File_close(&fd); VRFY((mpierr == MPI_SUCCESS), ""); - return(filesize); + return (filesize); } -typedef enum write_pattern { - none, - sec_last, - all -} write_type; - -typedef enum access_ { - write_all, - open_only, - extend_only -} access_type; +typedef enum write_pattern { none, sec_last, all } write_type; +typedef enum access_ { write_all, open_only, extend_only } access_type; /* * This creates a dataset serially with chunks, each of CHUNK_SIZE @@ -71,98 +58,97 @@ typedef enum access_ { static void create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern) { - hid_t file_id, dataset; /* handles */ - hid_t dataspace,memspace; - hid_t cparms; - hsize_t dims[1]; - hsize_t maxdims[1] = {H5S_UNLIMITED}; - - hsize_t chunk_dims[1] ={CHUNK_SIZE}; - hsize_t count[1]; - hsize_t stride[1]; - hsize_t block[1]; - hsize_t offset[1]; /* Selection offset within dataspace */ + hid_t file_id, dataset; /* handles */ + hid_t dataspace, memspace; + hid_t cparms; + hsize_t dims[1]; + hsize_t maxdims[1] = {H5S_UNLIMITED}; + + hsize_t chunk_dims[1] = {CHUNK_SIZE}; + hsize_t count[1]; + hsize_t stride[1]; + hsize_t block[1]; + hsize_t offset[1]; /* Selection offset within dataspace */ /* Variables used in reading data back */ - char buffer[CHUNK_SIZE]; - long nchunks; - herr_t hrc; + char buffer[CHUNK_SIZE]; + long nchunks; + herr_t hrc; - MPI_Offset filesize, /* actual file size */ - est_filesize; /* estimated file size */ + MPI_Offset filesize, /* actual file size */ + est_filesize; /* estimated file size */ /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* Only MAINPROCESS should create the file. Others just wait. */ - if (MAINPROCESS){ - nchunks=chunk_factor*mpi_size; - dims[0]=nchunks*CHUNK_SIZE; - /* Create the data space with unlimited dimensions. */ - dataspace = H5Screate_simple (1, dims, maxdims); - VRFY((dataspace >= 0), ""); + if (MAINPROCESS) { + nchunks = chunk_factor * mpi_size; + dims[0] = (hsize_t)(nchunks * CHUNK_SIZE); + /* Create the data space with unlimited dimensions. */ + dataspace = H5Screate_simple(1, dims, maxdims); + VRFY((dataspace >= 0), ""); - memspace = H5Screate_simple(1, chunk_dims, NULL); - VRFY((memspace >= 0), ""); + memspace = H5Screate_simple(1, chunk_dims, NULL); + VRFY((memspace >= 0), ""); - /* Create a new file. If file exists its contents will be overwritten. */ - file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, - H5P_DEFAULT); - VRFY((file_id >= 0), "H5Fcreate"); + /* Create a new file. If file exists its contents will be overwritten. */ + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((file_id >= 0), "H5Fcreate"); - /* Modify dataset creation properties, i.e. enable chunking */ - cparms = H5Pcreate(H5P_DATASET_CREATE); - VRFY((cparms >= 0), ""); + /* Modify dataset creation properties, i.e. enable chunking */ + cparms = H5Pcreate(H5P_DATASET_CREATE); + VRFY((cparms >= 0), ""); - hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); - VRFY((hrc >= 0), ""); + hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY); + VRFY((hrc >= 0), ""); - hrc = H5Pset_chunk(cparms, 1, chunk_dims); - VRFY((hrc >= 0), ""); + hrc = H5Pset_chunk(cparms, 1, chunk_dims); + VRFY((hrc >= 0), ""); - /* Create a new dataset within the file using cparms creation properties. */ - dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT); - VRFY((dataset >= 0), ""); + /* Create a new dataset within the file using cparms creation properties. */ + dataset = + H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT); + VRFY((dataset >= 0), ""); - if(write_pattern == sec_last) { + if (write_pattern == sec_last) { HDmemset(buffer, 100, CHUNK_SIZE); - count[0] = 1; + count[0] = 1; stride[0] = 1; - block[0] = chunk_dims[0]; - offset[0] = (nchunks-2)*chunk_dims[0]; + block[0] = chunk_dims[0]; + offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0]; hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); - VRFY((hrc >= 0), ""); + VRFY((hrc >= 0), ""); /* Write sec_last chunk */ hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); VRFY((hrc >= 0), "H5Dwrite"); } /* end if */ - /* Close resources */ - hrc = H5Dclose (dataset); - VRFY((hrc >= 0), ""); - dataset = -1; - - hrc = H5Sclose (dataspace); - VRFY((hrc >= 0), ""); + /* Close resources */ + hrc = H5Dclose(dataset); + VRFY((hrc >= 0), ""); + dataset = -1; - hrc = H5Sclose (memspace); - VRFY((hrc >= 0), ""); + hrc = H5Sclose(dataspace); + VRFY((hrc >= 0), ""); - hrc = H5Pclose (cparms); - VRFY((hrc >= 0), ""); + hrc = H5Sclose(memspace); + VRFY((hrc >= 0), ""); - hrc = H5Fclose (file_id); - VRFY((hrc >= 0), ""); - file_id = -1; + hrc = H5Pclose(cparms); + VRFY((hrc >= 0), ""); - /* verify file size */ - filesize = get_filesize(filename); - est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char); - VRFY((filesize >= est_filesize), "file size check"); + hrc = H5Fclose(file_id); + VRFY((hrc >= 0), ""); + file_id = -1; + /* verify file size */ + filesize = get_filesize(filename); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); + VRFY((filesize >= est_filesize), "file size check"); } /* Make sure all processes are done before exiting this routine. Otherwise, @@ -173,7 +159,6 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ MPI_Barrier(MPI_COMM_WORLD); } - /* * This program performs three different types of parallel access. It writes on * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only @@ -181,51 +166,52 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_ * consistent with argument 'chunk_factor'. */ static void -parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, hid_t *dataset) +parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id, + hid_t *dataset) { /* HDF5 gubbins */ - hid_t memspace, dataspace; /* HDF5 file identifier */ - hid_t access_plist; /* HDF5 ID for file access property list */ - herr_t hrc; /* HDF5 return code */ - hsize_t size[1]; - - hsize_t chunk_dims[1] ={CHUNK_SIZE}; - hsize_t count[1]; - hsize_t stride[1]; - hsize_t block[1]; - hsize_t offset[1]; /* Selection offset within dataspace */ - hsize_t dims[1]; - hsize_t maxdims[1]; + hid_t memspace, dataspace; /* HDF5 file identifier */ + hid_t access_plist; /* HDF5 ID for file access property list */ + herr_t hrc; /* HDF5 return code */ + hsize_t size[1]; + + hsize_t chunk_dims[1] = {CHUNK_SIZE}; + hsize_t count[1]; + hsize_t stride[1]; + hsize_t block[1]; + hsize_t offset[1]; /* Selection offset within dataspace */ + hsize_t dims[1]; + hsize_t maxdims[1]; /* Variables used in reading data back */ - char buffer[CHUNK_SIZE]; - int i; - long nchunks; + char buffer[CHUNK_SIZE]; + int i; + long nchunks; /* MPI Gubbins */ - MPI_Offset filesize, /* actual file size */ - est_filesize; /* estimated file size */ + MPI_Offset filesize, /* actual file size */ + est_filesize; /* estimated file size */ /* Initialize MPI */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - nchunks=chunk_factor*mpi_size; + nchunks = chunk_factor * mpi_size; /* Set up MPIO file access property lists */ - access_plist = H5Pcreate(H5P_FILE_ACCESS); + access_plist = H5Pcreate(H5P_FILE_ACCESS); VRFY((access_plist >= 0), ""); hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); VRFY((hrc >= 0), ""); /* Open the file */ - if (*file_id<0){ + if (*file_id < 0) { *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist); VRFY((*file_id >= 0), ""); } /* Open dataset*/ - if (*dataset<0){ + if (*dataset < 0) { *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); VRFY((*dataset >= 0), ""); } @@ -236,26 +222,26 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti dataspace = H5Dget_space(*dataset); VRFY((dataspace >= 0), ""); - size[0] = nchunks*CHUNK_SIZE; + size[0] = (hsize_t)nchunks * CHUNK_SIZE; switch (action) { /* all chunks are written by all the processes in an interleaved way*/ case write_all: - memset(buffer, mpi_rank+1, CHUNK_SIZE); - count[0] = 1; - stride[0] = 1; - block[0] = chunk_dims[0]; - for (i=0; i<nchunks/mpi_size; i++){ - offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0]; + HDmemset(buffer, mpi_rank + 1, CHUNK_SIZE); + count[0] = 1; + stride[0] = 1; + block[0] = chunk_dims[0]; + for (i = 0; i < nchunks / mpi_size; i++) { + offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0]; - hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); - VRFY((hrc >= 0), ""); + hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); + VRFY((hrc >= 0), ""); - /* Write the buffer out */ - hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); - VRFY((hrc >= 0), "H5Dwrite"); + /* Write the buffer out */ + hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer); + VRFY((hrc >= 0), "H5Dwrite"); } break; @@ -285,10 +271,10 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti VRFY((hrc >= 0), ""); *dataset = -1; - hrc = H5Sclose (dataspace); + hrc = H5Sclose(dataspace); VRFY((hrc >= 0), ""); - hrc = H5Sclose (memspace); + hrc = H5Sclose(memspace); VRFY((hrc >= 0), ""); hrc = H5Fclose(*file_id); @@ -296,8 +282,8 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti *file_id = -1; /* verify file size */ - filesize = get_filesize(filename); - est_filesize = nchunks*CHUNK_SIZE*sizeof(unsigned char); + filesize = get_filesize(filename); + est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char); VRFY((filesize >= est_filesize), "file size check"); /* Can close some plists */ @@ -320,45 +306,45 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti * interleaved pattern. */ static void -verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, - hid_t *file_id, hid_t *dataset) +verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id, + hid_t *dataset) { /* HDF5 gubbins */ - hid_t dataspace, memspace; /* HDF5 file identifier */ - hid_t access_plist; /* HDF5 ID for file access property list */ - herr_t hrc; /* HDF5 return code */ - - hsize_t chunk_dims[1] ={CHUNK_SIZE}; - hsize_t count[1]; - hsize_t stride[1]; - hsize_t block[1]; - hsize_t offset[1]; /* Selection offset within dataspace */ + hid_t dataspace, memspace; /* HDF5 file identifier */ + hid_t access_plist; /* HDF5 ID for file access property list */ + herr_t hrc; /* HDF5 return code */ + + hsize_t chunk_dims[1] = {CHUNK_SIZE}; + hsize_t count[1]; + hsize_t stride[1]; + hsize_t block[1]; + hsize_t offset[1]; /* Selection offset within dataspace */ /* Variables used in reading data back */ - char buffer[CHUNK_SIZE]; - int value, i; - int index_l; - long nchunks; + char buffer[CHUNK_SIZE]; + int value, i; + int index_l; + long nchunks; /* Initialize MPI */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - nchunks=chunk_factor*mpi_size; + nchunks = chunk_factor * mpi_size; /* Set up MPIO file access property lists */ - access_plist = H5Pcreate(H5P_FILE_ACCESS); + access_plist = H5Pcreate(H5P_FILE_ACCESS); VRFY((access_plist >= 0), ""); hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); VRFY((hrc >= 0), ""); /* Open the file */ - if (*file_id<0){ + if (*file_id < 0) { *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist); VRFY((*file_id >= 0), ""); } /* Open dataset*/ - if (*dataset<0){ + if (*dataset < 0) { *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT); VRFY((*dataset >= 0), ""); } @@ -370,14 +356,14 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in VRFY((dataspace >= 0), ""); /* all processes check all chunks. */ - count[0] = 1; + count[0] = 1; stride[0] = 1; - block[0] = chunk_dims[0]; - for (i=0; i<nchunks; i++){ - /* reset buffer values */ - memset(buffer, -1, CHUNK_SIZE); + block[0] = chunk_dims[0]; + for (i = 0; i < nchunks; i++) { + /* reset buffer values */ + HDmemset(buffer, -1, CHUNK_SIZE); - offset[0] = i*chunk_dims[0]; + offset[0] = (hsize_t)i * chunk_dims[0]; hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block); VRFY((hrc >= 0), ""); @@ -387,40 +373,40 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in VRFY((hrc >= 0), "H5Dread"); /* set expected value according the write pattern */ - switch (write_pattern) { - case all: - value = i%mpi_size + 1; - break; - case none: - value = 0; - break; + switch (write_pattern) { + case all: + value = i % mpi_size + 1; + break; + case none: + value = 0; + break; case sec_last: - if (i==nchunks-2) - value = 100; - else - value = 0; + if (i == nchunks - 2) + value = 100; + else + value = 0; break; default: HDassert(0); - } + } /* verify content of the chunk */ for (index_l = 0; index_l < CHUNK_SIZE; index_l++) VRFY((buffer[index_l] == value), "data verification"); } - hrc = H5Sclose (dataspace); - VRFY((hrc >= 0), ""); + hrc = H5Sclose(dataspace); + VRFY((hrc >= 0), ""); - hrc = H5Sclose (memspace); - VRFY((hrc >= 0), ""); + hrc = H5Sclose(memspace); + VRFY((hrc >= 0), ""); /* Can close some plists */ hrc = H5Pclose(access_plist); VRFY((hrc >= 0), ""); /* Close up */ - if (vclose){ + if (vclose) { hrc = H5Dclose(*dataset); VRFY((hrc >= 0), ""); *dataset = -1; @@ -437,8 +423,6 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in MPI_Barrier(MPI_COMM_WORLD); } - - /* * Test following possible scenarios, * Case 1: @@ -461,17 +445,17 @@ void test_chunk_alloc(void) { const char *filename; - hid_t file_id, dataset; + hid_t file_id, dataset; file_id = dataset = -1; /* Initialize MPI */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - filename = (const char*)GetTestParameters(); + filename = (const char *)GetTestParameters(); if (VERBOSE_MED) - printf("Extend Chunked allocation test on file %s\n", filename); + HDprintf("Extend Chunked allocation test on file %s\n", filename); /* Case 1 */ /* Create chunked dataset without writing anything.*/ @@ -498,5 +482,4 @@ test_chunk_alloc(void) parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset); /* reopen dataset in parallel, read and verify the data */ verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset); - } diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index ab9de09..c6ed9b1 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "testphdf5.h" @@ -18,26 +15,23 @@ #define HYPER 1 #define POINT 2 -#define ALL 3 +#define ALL 3 /* some commonly used routines for collective chunk IO tests*/ -static void ccslab_set(int mpi_rank,int mpi_size,hsize_t start[],hsize_t count[], - hsize_t stride[],hsize_t block[],int mode); +static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], + hsize_t block[], int mode); -static void ccdataset_fill(hsize_t start[],hsize_t count[], - hsize_t stride[],hsize_t block[],DATATYPE*dataset, - int mem_selection); +static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + DATATYPE *dataset, int mem_selection); -static void ccdataset_print(hsize_t start[],hsize_t block[],DATATYPE*dataset); +static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset); -static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], - hsize_t block[], DATATYPE *dataset, DATATYPE *original, - int mem_selection); - -static void coll_chunktest(const char* filename, int chunk_factor, int select_factor, - int api_option, int file_selection, int mem_selection, int mode); +static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + DATATYPE *dataset, DATATYPE *original, int mem_selection); +static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, + int file_selection, int mem_selection, int mode); /*------------------------------------------------------------------------- * Function: coll_chunk1 @@ -58,7 +52,7 @@ static void coll_chunktest(const char* filename, int chunk_factor, int select_fa */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: One big singluar selection inside one chunk + * Descriptions for the selection: One big singular selection inside one chunk * Two dimensions, * * dim1 = SPACE_DIM1(5760)*mpi_size @@ -90,7 +84,6 @@ coll_chunk1(void) coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER); } - /*------------------------------------------------------------------------- * Function: coll_chunk2 * @@ -109,7 +102,7 @@ coll_chunk1(void) *------------------------------------------------------------------------- */ - /* ------------------------------------------------------------------------ +/* ------------------------------------------------------------------------ * Descriptions for the selection: many disjoint selections inside one chunk * Two dimensions, * @@ -142,7 +135,6 @@ coll_chunk2(void) coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER); } - /*------------------------------------------------------------------------- * Function: coll_chunk3 * @@ -162,7 +154,7 @@ coll_chunk2(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -183,7 +175,7 @@ void coll_chunk3(void) { const char *filename = GetTestParameters(); - int mpi_size; + int mpi_size; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER); @@ -216,7 +208,7 @@ coll_chunk3(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -268,7 +260,7 @@ coll_chunk4(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -322,7 +314,7 @@ coll_chunk5(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -374,7 +366,7 @@ coll_chunk6(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -426,7 +418,7 @@ coll_chunk7(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -478,7 +470,7 @@ coll_chunk8(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -498,17 +490,17 @@ coll_chunk8(void) void coll_chunk9(void) { - const char *filename = GetTestParameters(); + const char *filename = GetTestParameters(); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER); } /*------------------------------------------------------------------------- @@ -530,7 +522,7 @@ coll_chunk9(void) */ /* ------------------------------------------------------------------------ - * Descriptions for the selection: one singular selection accross many chunks + * Descriptions for the selection: one singular selection across many chunks * Two dimensions, Num of chunks = 2* mpi_size * * dim1 = SPACE_DIM1*mpi_size @@ -550,28 +542,27 @@ coll_chunk9(void) void coll_chunk10(void) { - const char *filename = GetTestParameters(); + const char *filename = GetTestParameters(); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER); - coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER); + coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER); } - /*------------------------------------------------------------------------- * Function: coll_chunktest * * Purpose: The real testing routine for regular selection of collective chunking storage testing both write and read, - If anything fails, it may be read or write. There is no - separation test between read and write. + If anything fails, it may be read or write. There is no + separation test between read and write. * * Return: Success: 0 * @@ -592,573 +583,566 @@ coll_chunk10(void) */ static void -coll_chunktest(const char* filename, - int chunk_factor, - int select_factor, - int api_option, - int file_selection, - int mem_selection, - int mode) +coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection, + int mem_selection, int mode) { - hid_t file, dataset, file_dataspace, mem_dataspace; - hid_t acc_plist,xfer_plist,crp_plist; + hid_t file, dataset, file_dataspace, mem_dataspace; + hid_t acc_plist, xfer_plist, crp_plist; - hsize_t dims[RANK], chunk_dims[RANK]; - int* data_array1 = NULL; - int* data_origin1 = NULL; + hsize_t dims[RANK], chunk_dims[RANK]; + int *data_array1 = NULL; + int *data_origin1 = NULL; - hsize_t start[RANK],count[RANK],stride[RANK],block[RANK]; + hsize_t start[RANK], count[RANK], stride[RANK], block[RANK]; #ifdef H5_HAVE_INSTRUMENTED_LIBRARY - unsigned prop_value; + unsigned prop_value; #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ - int mpi_size,mpi_rank; + int mpi_size, mpi_rank; + + herr_t status; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + hsize_t current_dims; /* for point selection */ + + /* set up MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Create the data space */ + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((acc_plist >= 0), ""); + + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist); + VRFY((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY((status >= 0), ""); + + /* setup dimensionality object */ + dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size); + dims[1] = SPACE_DIM2; + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); + + /* set up the coords array selection */ + num_points = block[0] * block[1] * count[0] * count[1]; + coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); + VRFY((coords != NULL), "coords malloc succeeded"); + point_set(start, count, stride, block, num_points, coords, mode); + + file_dataspace = H5Screate_simple(2, dims, NULL); + VRFY((file_dataspace >= 0), "file dataspace created succeeded"); + + if (ALL != mem_selection) { + mem_dataspace = H5Screate_simple(2, dims, NULL); + VRFY((mem_dataspace >= 0), "mem dataspace created succeeded"); + } + else { + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); + VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + crp_plist = H5Pcreate(H5P_DATASET_CREATE); + VRFY((crp_plist >= 0), ""); + + /* Set up chunk information. */ + chunk_dims[0] = dims[0] / (hsize_t)chunk_factor; + + /* to decrease the testing time, maintain bigger chunk size */ + (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2); + status = H5Pset_chunk(crp_plist, 2, chunk_dims); + VRFY((status >= 0), "chunk creation property list succeeded"); + + dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT, + crp_plist, H5P_DEFAULT); + VRFY((dataset >= 0), "dataset created succeeded"); + + status = H5Pclose(crp_plist); + VRFY((status >= 0), ""); + + /*put some trivial data in the data array */ + ccdataset_fill(start, stride, count, block, data_array1, mem_selection); - herr_t status; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - hsize_t current_dims; /* for point selection */ - int i; - - /* set up MPI parameters */ - MPI_Comm_size(comm,&mpi_size); - MPI_Comm_rank(comm,&mpi_rank); - - /* Create the data space */ - - acc_plist = create_faccess_plist(comm,info,facc_type); - VRFY((acc_plist >= 0),""); - - file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist); - VRFY((file >= 0),"H5Fcreate succeeded"); - - status = H5Pclose(acc_plist); - VRFY((status >= 0),""); - - /* setup dimensionality object */ - dims[0] = SPACE_DIM1*mpi_size; - dims[1] = SPACE_DIM2; - - /* allocate memory for data buffer */ - data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* set up dimensions of the slab this process accesses */ - ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); - - /* set up the coords array selection */ - num_points = block[0] * block[1] * count[0] * count[1]; - coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t)); - VRFY((coords != NULL), "coords malloc succeeded"); - point_set(start, count, stride, block, num_points, coords, mode); - - file_dataspace = H5Screate_simple(2, dims, NULL); - VRFY((file_dataspace >= 0), "file dataspace created succeeded"); - - if(ALL != mem_selection) { - mem_dataspace = H5Screate_simple(2, dims, NULL); - VRFY((mem_dataspace >= 0), "mem dataspace created succeeded"); - } - else { - current_dims = num_points; - mem_dataspace = H5Screate_simple (1, ¤t_dims, NULL); - VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); - } - - crp_plist = H5Pcreate(H5P_DATASET_CREATE); - VRFY((crp_plist >= 0),""); - - /* Set up chunk information. */ - chunk_dims[0] = dims[0]/chunk_factor; - - /* to decrease the testing time, maintain bigger chunk size */ - (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2/2); - status = H5Pset_chunk(crp_plist, 2, chunk_dims); - VRFY((status >= 0),"chunk creation property list succeeded"); - - dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, - file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT); - VRFY((dataset >= 0),"dataset created succeeded"); - - status = H5Pclose(crp_plist); - VRFY((status >= 0), ""); - - /*put some trivial data in the data array */ - ccdataset_fill(start, stride, count,block, data_array1, mem_selection); - - MESG("data_array initialized"); - - switch (file_selection) { - case HYPER: - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0),"hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0),"Element selection succeeded"); - } - else { - status = H5Sselect_none(file_dataspace); - VRFY((status >= 0),"none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(file_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - switch (mem_selection) { - case HYPER: - status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0),"hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0),"Element selection succeeded"); - } - else { - status = H5Sselect_none(mem_dataspace); - VRFY((status >= 0),"none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(mem_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - /* set up the collective transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - - status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((status>= 0),"MPIO collective transfer property succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((status>= 0),"set independent IO collectively succeeded"); - } - - switch(api_option){ - case API_LINK_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO); - VRFY((status>= 0),"collective chunk optimization succeeded"); - break; - - case API_MULTI_HARD: - status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((status>= 0),"collective chunk optimization succeeded "); - break; - - case API_LINK_TRUE: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2); - VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); - break; - - case API_LINK_FALSE: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6); - VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); - break; - - case API_MULTI_COLL: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */ - VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50); - VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded"); - break; - - case API_MULTI_IND: - status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */ - VRFY((status>= 0),"collective chunk optimization set chunk number succeeded"); - status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100); - VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded"); - break; - - default: - ; - } + MESG("data_array initialized"); + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* set up the collective transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status >= 0), "set independent IO collectively succeeded"); + } + + switch (api_option) { + case API_LINK_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO); + VRFY((status >= 0), "collective chunk optimization succeeded"); + break; + + case API_MULTI_HARD: + status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO); + VRFY((status >= 0), "collective chunk optimization succeeded "); + break; + + case API_LINK_TRUE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2); + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_LINK_FALSE: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6); + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + break; + + case API_MULTI_COLL: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50); + VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + case API_MULTI_IND: + status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */ + VRFY((status >= 0), "collective chunk optimization set chunk number succeeded"); + status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100); + VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded"); + break; + + default:; + } #ifdef H5_HAVE_INSTRUMENTED_LIBRARY - if(facc_type == FACC_MPIO) { - switch(api_option) { + if (facc_type == FACC_MPIO) { + switch (api_option) { case API_LINK_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, - NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0),"testing property list inserted succeeded"); - break; + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; case API_MULTI_HARD: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, - NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0),"testing property list inserted succeeded"); - break; + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; case API_LINK_TRUE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, - NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0),"testing property list inserted succeeded"); - break; + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; case API_LINK_FALSE: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, - NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0),"testing property list inserted succeeded"); - break; + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; case API_MULTI_COLL: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, - NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0),"testing property list inserted succeeded"); - break; + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, + H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; case API_MULTI_IND: - prop_value = H5D_XFER_COLL_CHUNK_DEF; - status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value, - NULL, NULL, NULL, NULL, NULL, NULL); - VRFY((status >= 0),"testing property list inserted succeeded"); - break; - - default: - ; - } - } + prop_value = H5D_XFER_COLL_CHUNK_DEF; + status = + H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL); + VRFY((status >= 0), "testing property list inserted succeeded"); + break; + + default:; + } + } #endif - /* write data collectively */ - status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((status >= 0),"dataset write succeeded"); + /* write data collectively */ + status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((status >= 0), "dataset write succeeded"); #ifdef H5_HAVE_INSTRUMENTED_LIBRARY - if(facc_type == FACC_MPIO) { - switch(api_option){ + /* Only check chunk optimization mode if selection I/O is not being used - + * selection I/O bypasses this IO mode decision - it's effectively always + * multi chunk currently */ + if (facc_type == FACC_MPIO && !H5_use_selection_io_g) { + switch (api_option) { case API_LINK_HARD: - status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value); - VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded"); - break; + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded"); + break; case API_MULTI_HARD: - status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value); - VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); - break; + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded"); + break; case API_LINK_TRUE: - status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value); - VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded"); - break; + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded"); + break; case API_LINK_FALSE: - status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value); - VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded"); - break; + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded"); + break; case API_MULTI_COLL: - status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value); - VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); - break; + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded"); + break; case API_MULTI_IND: - status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value); - VRFY((status >= 0),"testing property list get succeeded"); - VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded"); - break; - - default: - ; - } - } + status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value); + VRFY((status >= 0), "testing property list get succeeded"); + VRFY((prop_value == 0), + "API to set MULTI-CHUNK IO transferring to independent IO succeeded"); + break; + + default:; + } + } #endif - status = H5Dclose(dataset); - VRFY((status >= 0),""); - - status = H5Pclose(xfer_plist); - VRFY((status >= 0),"property list closed"); - - status = H5Sclose(file_dataspace); - VRFY((status >= 0),""); - - status = H5Sclose(mem_dataspace); - VRFY((status >= 0),""); - - - status = H5Fclose(file); - VRFY((status >= 0),""); - - if (data_array1) HDfree(data_array1); - - /* Use collective read to verify the correctness of collective write. */ - - /* allocate memory for data buffer */ - data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* allocate memory for data buffer */ - data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - acc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((acc_plist >= 0),"MPIO creation property list succeeded"); - - file = H5Fopen(filename,H5F_ACC_RDONLY,acc_plist); - VRFY((file >= 0),"H5Fcreate succeeded"); - - status = H5Pclose(acc_plist); - VRFY((status >= 0),""); - - /* open the collective dataset*/ - dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); - VRFY((dataset >= 0), ""); - - /* set up dimensions of the slab this process accesses */ - ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); - - /* obtain the file and mem dataspace*/ - file_dataspace = H5Dget_space (dataset); - VRFY((file_dataspace >= 0), ""); - - if (ALL != mem_selection) { - mem_dataspace = H5Dget_space (dataset); - VRFY((mem_dataspace >= 0), ""); - } - else { - current_dims = num_points; - mem_dataspace = H5Screate_simple (1, ¤t_dims, NULL); - VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); - } - - switch (file_selection) { - case HYPER: - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0),"hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0),"Element selection succeeded"); - } - else { - status = H5Sselect_none(file_dataspace); - VRFY((status >= 0),"none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(file_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - switch (mem_selection) { - case HYPER: - status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((status >= 0),"hyperslab selection succeeded"); - break; - - case POINT: - if (num_points) { - status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((status >= 0),"Element selection succeeded"); - } - else { - status = H5Sselect_none(mem_dataspace); - VRFY((status >= 0),"none selection succeeded"); - } - break; - - case ALL: - status = H5Sselect_all(mem_dataspace); - VRFY((status >= 0), "H5Sselect_all succeeded"); - break; - } - - /* fill dataset with test data */ - ccdataset_fill(start, stride,count,block, data_origin1, mem_selection); - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0),""); - - status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((status>= 0),"MPIO collective transfer property succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((status>= 0),"set independent IO collectively succeeded"); - } - - status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((status >=0),"dataset read succeeded"); - - /* verify the read data with original expected data */ - status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); - if (status) nerrors++; - - status = H5Pclose(xfer_plist); - VRFY((status >= 0),"property list closed"); - - /* close dataset collectively */ - status=H5Dclose(dataset); - VRFY((status >= 0), "H5Dclose"); - - /* release all IDs created */ - status = H5Sclose(file_dataspace); - VRFY((status >= 0),"H5Sclose"); - - status = H5Sclose(mem_dataspace); - VRFY((status >= 0),"H5Sclose"); - - /* close the file collectively */ - status = H5Fclose(file); - VRFY((status >= 0),"H5Fclose"); - - /* release data buffers */ - if(coords) HDfree(coords); - if(data_array1) HDfree(data_array1); - if(data_origin1) HDfree(data_origin1); + status = H5Dclose(dataset); + VRFY((status >= 0), ""); -} + status = H5Pclose(xfer_plist); + VRFY((status >= 0), "property list closed"); + + status = H5Sclose(file_dataspace); + VRFY((status >= 0), ""); + + status = H5Sclose(mem_dataspace); + VRFY((status >= 0), ""); + status = H5Fclose(file); + VRFY((status >= 0), ""); + + if (data_array1) + HDfree(data_array1); + + /* Use collective read to verify the correctness of collective write. */ + + /* allocate memory for data buffer */ + data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); + + /* allocate memory for data buffer */ + data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int)); + VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); + + acc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((acc_plist >= 0), "MPIO creation property list succeeded"); + + file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist); + VRFY((file >= 0), "H5Fcreate succeeded"); + + status = H5Pclose(acc_plist); + VRFY((status >= 0), ""); + + /* open the collective dataset*/ + dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT); + VRFY((dataset >= 0), ""); + + /* set up dimensions of the slab this process accesses */ + ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor); + + /* obtain the file and mem dataspace*/ + file_dataspace = H5Dget_space(dataset); + VRFY((file_dataspace >= 0), ""); + + if (ALL != mem_selection) { + mem_dataspace = H5Dget_space(dataset); + VRFY((mem_dataspace >= 0), ""); + } + else { + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); + VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); + } + + switch (file_selection) { + case HYPER: + status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(file_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(file_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + switch (mem_selection) { + case HYPER: + status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((status >= 0), "hyperslab selection succeeded"); + break; + + case POINT: + if (num_points) { + status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); + VRFY((status >= 0), "Element selection succeeded"); + } + else { + status = H5Sselect_none(mem_dataspace); + VRFY((status >= 0), "none selection succeeded"); + } + break; + + case ALL: + status = H5Sselect_all(mem_dataspace); + VRFY((status >= 0), "H5Sselect_all succeeded"); + break; + } + + /* fill dataset with test data */ + ccdataset_fill(start, stride, count, block, data_origin1, mem_selection); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist >= 0), ""); + + status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((status >= 0), "MPIO collective transfer property succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status >= 0), "set independent IO collectively succeeded"); + } + + status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + VRFY((status >= 0), "dataset read succeeded"); + + /* verify the read data with original expected data */ + status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection); + if (status) + nerrors++; + + status = H5Pclose(xfer_plist); + VRFY((status >= 0), "property list closed"); + + /* close dataset collectively */ + status = H5Dclose(dataset); + VRFY((status >= 0), "H5Dclose"); + + /* release all IDs created */ + status = H5Sclose(file_dataspace); + VRFY((status >= 0), "H5Sclose"); + + status = H5Sclose(mem_dataspace); + VRFY((status >= 0), "H5Sclose"); + + /* close the file collectively */ + status = H5Fclose(file); + VRFY((status >= 0), "H5Fclose"); + + /* release data buffers */ + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); +} /* Set up the selection */ static void -ccslab_set(int mpi_rank, - int mpi_size, - hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - int mode) +ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) { - switch (mode){ - - case BYROW_CONT: - /* Each process takes a slabs of rows. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = SPACE_DIM1; - count[1] = SPACE_DIM2; - start[0] = mpi_rank*count[0]; - start[1] = 0; - - break; - - case BYROW_DISCONT: - /* Each process takes several disjoint blocks. */ - block[0] = 1; - block[1] = 1; - stride[0] = 3; - stride[1] = 3; - count[0] = SPACE_DIM1/(stride[0]*block[0]); - count[1] = (SPACE_DIM2)/(stride[1]*block[1]); - start[0] = SPACE_DIM1*mpi_rank; - start[1] = 0; - - break; - - case BYROW_SELECTNONE: - /* Each process takes a slabs of rows, there are - no selections for the last process. */ - block[0] = 1; - block[1] = 1; - stride[0] = 1; - stride[1] = 1; - count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:SPACE_DIM1); - count[1] = SPACE_DIM2; - start[0] = mpi_rank*count[0]; - start[1] = 0; - - break; - - case BYROW_SELECTUNBALANCE: - /* The first one-third of the number of processes only - select top half of the domain, The rest will select the bottom - half of the domain. */ - - block[0] = 1; - count[0] = 2; - stride[0] = SPACE_DIM1*mpi_size/4+1; - block[1] = SPACE_DIM2; - count[1] = 1; - start[1] = 0; - stride[1] = 1; - if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank; - else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3); - break; - - case BYROW_SELECTINCHUNK: - /* Each process will only select one chunk */ - - block[0] = 1; - count[0] = 1; - start[0] = mpi_rank*SPACE_DIM1; - stride[0]= 1; - block[1] = SPACE_DIM2; - count[1] = 1; - stride[1]= 1; - start[1] = 0; - - break; - - default: - /* Unknown mode. Set it to cover the whole dataset. */ - block[0] = SPACE_DIM1*mpi_size; - block[1] = SPACE_DIM2; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; - - break; + switch (mode) { + + case BYROW_CONT: + /* Each process takes a slabs of rows. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = SPACE_DIM1; + count[1] = SPACE_DIM2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_DISCONT: + /* Each process takes several disjoint blocks. */ + block[0] = 1; + block[1] = 1; + stride[0] = 3; + stride[1] = 3; + count[0] = SPACE_DIM1 / (stride[0] * block[0]); + count[1] = (SPACE_DIM2) / (stride[1] * block[1]); + start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank; + start[1] = 0; + + break; + + case BYROW_SELECTNONE: + /* Each process takes a slabs of rows, there are + no selections for the last process. */ + block[0] = 1; + block[1] = 1; + stride[0] = 1; + stride[1] = 1; + count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1); + count[1] = SPACE_DIM2; + start[0] = (hsize_t)mpi_rank * count[0]; + start[1] = 0; + + break; + + case BYROW_SELECTUNBALANCE: + /* The first one-third of the number of processes only + select top half of the domain, The rest will select the bottom + half of the domain. */ + + block[0] = 1; + count[0] = 2; + stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1; + block[1] = SPACE_DIM2; + count[1] = 1; + start[1] = 0; + stride[1] = 1; + if ((mpi_rank * 3) < (mpi_size * 2)) + start[0] = (hsize_t)mpi_rank; + else + start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3)); + break; + + case BYROW_SELECTINCHUNK: + /* Each process will only select one chunk */ + + block[0] = 1; + count[0] = 1; + start[0] = (hsize_t)(mpi_rank * SPACE_DIM1); + stride[0] = 1; + block[1] = SPACE_DIM2; + count[1] = 1; + stride[1] = 1; + start[1] = 0; + + break; + + default: + /* Unknown mode. Set it to cover the whole dataset. */ + block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size; + block[1] = SPACE_DIM2; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + + break; } - if (VERBOSE_MED){ - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0]*block[1]*count[0]*count[1])); + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); } } - /* * Fill the dataset with trivial data for testing. * Assume dimension rank is 2. */ static void -ccdataset_fill(hsize_t start[], - hsize_t stride[], - hsize_t count[], - hsize_t block[], - DATATYPE * dataset, +ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset, int mem_selection) { DATATYPE *dataptr = dataset; DATATYPE *tmptr; - hsize_t i,j,k1,k2,k=0; + hsize_t i, j, k1, k2, k = 0; /* put some trivial data in the data_array */ tmptr = dataptr; @@ -1166,23 +1150,23 @@ ccdataset_fill(hsize_t start[], through the pointer */ for (k1 = 0; k1 < count[0]; k1++) { - for(i = 0; i < block[0]; i++) { - for(k2 = 0; k2 < count[1]; k2++) { - for(j = 0;j < block[1]; j++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { - if (ALL != mem_selection) { - dataptr = tmptr + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+ - start[1]+k2*stride[1]+j); - } - else { - dataptr = tmptr + k; - k++; - } + if (ALL != mem_selection) { + dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + + k2 * stride[1] + j); + } + else { + dataptr = tmptr + k; + k++; + } - *dataptr = (DATATYPE)(k1+k2+i+j); - } + *dataptr = (DATATYPE)(k1 + k2 + i + j); + } + } } - } } } @@ -1190,83 +1174,75 @@ ccdataset_fill(hsize_t start[], * Print the first block of the content of the dataset. */ static void -ccdataset_print(hsize_t start[], - hsize_t block[], - DATATYPE * dataset) +ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) { DATATYPE *dataptr = dataset; - hsize_t i, j; + hsize_t i, j; /* print the column heading */ - printf("Print only the first block of the dataset\n"); - printf("%-8s", "Cols:"); - for (j=0; j < block[1]; j++){ - printf("%3lu ", (unsigned long)(start[1]+j)); + HDprintf("Print only the first block of the dataset\n"); + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); } - printf("\n"); + HDprintf("\n"); /* print the slab data */ - for (i=0; i < block[0]; i++){ - printf("Row %2lu: ", (unsigned long)(i+start[0])); - for (j=0; j < block[1]; j++){ - printf("%03d ", *dataptr++); - } - printf("\n"); + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); } } - /* * Print the content of the dataset. */ static int -ccdataset_vrfy(hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - DATATYPE *dataset, - DATATYPE *original, - int mem_selection) +ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original, int mem_selection) { - hsize_t i, j,k1,k2,k=0; - int vrfyerrs; - DATATYPE *dataptr,*oriptr; + hsize_t i, j, k1, k2, k = 0; + int vrfyerrs; + DATATYPE *dataptr, *oriptr; /* print it if VERBOSE_MED */ if (VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - ccdataset_print(start, block, original); - printf("compared values:\n"); - ccdataset_print(start, block, dataset); + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + ccdataset_print(start, block, original); + HDprintf("compared values:\n"); + ccdataset_print(start, block, dataset); } vrfyerrs = 0; - for (k1=0;k1<count[0];k1++) { - for(i=0;i<block[0];i++) { - for(k2=0; k2<count[1];k2++) { - for(j=0;j<block[1];j++) { + for (k1 = 0; k1 < count[0]; k1++) { + for (i = 0; i < block[0]; i++) { + for (k2 = 0; k2 < count[1]; k2++) { + for (j = 0; j < block[1]; j++) { if (ALL != mem_selection) { - dataptr = dataset + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+ - start[1]+k2*stride[1]+j); - oriptr = original + ((start[0]+k1*stride[0]+i)*SPACE_DIM2+ - start[1]+k2*stride[1]+j); + dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + + k2 * stride[1] + j); + oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] + + k2 * stride[1] + j); } else { dataptr = dataset + k; - oriptr = original + k; + oriptr = original + k; k++; } - if (*dataptr != *oriptr){ - if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ - printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, - *(oriptr), *(dataptr)); + if (*dataptr != *oriptr) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr)); } } } @@ -1274,8 +1250,8 @@ ccdataset_vrfy(hsize_t start[], } } if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); + HDprintf("[more errors ...]\n"); if (vrfyerrs) - printf("%d errors found in ccdataset_vrfy\n", vrfyerrs); - return(vrfyerrs); + HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs); + return (vrfyerrs); } diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c new file mode 100644 index 0000000..e402428 --- /dev/null +++ b/testpar/t_coll_md_read.c @@ -0,0 +1,526 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * A test suite to test HDF5's collective metadata read capabilities, as enabled + * by making a call to H5Pset_all_coll_metadata_ops(). + */ + +#include "testphdf5.h" + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +/* + * Define the non-participating process as the "last" + * rank to avoid any weirdness potentially caused by + * an if (mpi_rank == 0) check. + */ +#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1) +#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset" +#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2 +#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5 +#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5 + +#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2 + +#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000 +#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue" +#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1 + +/* + * A test for issue HDFFV-10501. A parallel hang was reported which occurred + * in linked-chunk I/O when collective metadata reads are enabled and some ranks + * do not have any selection in a dataset's dataspace, while others do. The ranks + * which have no selection during the read/write operation called H5D__chunk_addrmap() + * to retrieve the lowest chunk address, since we require that the read/write be done + * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees, + * this caused the non-participating ranks to issue a collective MPI_Bcast() call + * which the other ranks did not issue, thus causing a hang. + * + * However, since these ranks are not actually reading/writing anything, this call + * can simply be removed and the address used for the read/write can be set to an + * arbitrary number (0 was chosen). + */ +void +test_partial_no_selection_coll_md_read(void) +{ + const char *filename; + hsize_t *dataset_dims = NULL; + hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t sel_dims[1]; + hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE, + PARTIAL_NO_SELECTION_X_DIM_SCALE}; + hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + int mpi_rank, mpi_size; + void *data = NULL; + void *read_buf = NULL; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = GetTestParameters(); + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata reads + * on the FAPL, we call it here just to be sure this is futureproof, since + * demonstrating this issue relies upon it. + */ + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims)); + VRFY((dataset_dims != NULL), "malloc succeeded"); + + dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size; + dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size; + max_dataset_dims[0] = H5S_UNLIMITED; + max_dataset_dims[1] = H5S_UNLIMITED; + + fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + /* + * Set up chunking on the dataset in order to reproduce the problem. + */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), + "H5Pset_chunk succeeded"); + + dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + /* + * Setup hyperslab selection to split the dataset among the ranks. + * + * The ranks will write rows across the dataset. + */ + start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank; + start[1] = 0; + stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; + stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; + count[0] = 1; + count[1] = (hsize_t)mpi_size; + block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE; + block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE; + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + + sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE); + + mspace_id = H5Screate_simple(1, sel_dims, NULL); + VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); + + data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * + sizeof(int)); + VRFY((data != NULL), "calloc succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + /* + * Enable collective access for the data transfer. + */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); + + VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); + + /* + * Ensure that linked-chunk I/O is performed since this is + * the particular code path where the issue lies and we don't + * want the library doing multi-chunk I/O behind our backs. + */ + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * + sizeof(int)); + VRFY((read_buf != NULL), "malloc succeeded"); + + /* + * Make sure to call H5Sselect_none() on the non-participating process. + */ + if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { + VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded"); + VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded"); + } + + /* + * Finally have each rank read their section of data back from the dataset. + */ + VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), + "H5Dread succeeded"); + + /* + * Check data integrity just to be sure. + */ + if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) { + VRFY((!HDmemcmp(data, read_buf, + count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * + sizeof(int))), + "memcmp succeeded"); + } + + if (dataset_dims) { + HDfree(dataset_dims); + dataset_dims = NULL; + } + + if (data) { + HDfree(data); + data = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} + +/* + * A test for HDFFV-10562 which attempts to verify that using multi-chunk + * I/O with collective metadata reads enabled doesn't causes issues due to + * collective metadata reads being made only by process 0 in H5D__chunk_addrmap(). + * + * Failure in this test may either cause a hang, or, due to how the MPI calls + * pertaining to this issue might mistakenly match up, may cause an MPI error + * message similar to: + * + * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed + * major: Internal error (too specific to document in detail) + * minor: Some MPI function failed + * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack: + *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006) + *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................: + *MPIR_Bcast_intra(1249)............: + *MPIR_SMP_Bcast(1088)..............: + *MPIR_Bcast_binomial(239)..........: + *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer + *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String + * + */ +void +test_multi_chunk_io_addrmap_issue(void) +{ + const char *filename; + hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS]; + hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5}; + hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5}; + hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED}; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t space_id = H5I_INVALID_HID; + void *read_buf = NULL; + int mpi_rank; + int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}}; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + filename = GetTestParameters(); + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata reads + * on the FAPL, we call it here just to be sure this is futureproof, since + * demonstrating this issue relies upon it. + */ + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims); + VRFY((space_id >= 0), "H5Screate_simple succeeded"); + + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0), + "H5Pset_chunk succeeded"); + + dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + start[1] = 0; + stride[0] = stride[1] = 1; + count[0] = count[1] = 5; + block[0] = block[1] = 1; + + if (mpi_rank == 0) + start[0] = 0; + else + start[0] = 5; + + VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + if (mpi_rank != 0) + VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded"); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); + + VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); + + read_buf = HDmalloc(50 * sizeof(int)); + VRFY((read_buf != NULL), "malloc succeeded"); + + VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded"); + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} + +/* + * A test for HDFFV-10562 which attempts to verify that using linked-chunk + * I/O with collective metadata reads enabled doesn't cause issues due to + * collective metadata reads being made only by process 0 in H5D__sort_chunk(). + * + * NOTE: Due to the way that the threshold value which pertains to this test + * is currently calculated within HDF5, the following two conditions must be + * true to trigger the issue: + * + * Condition 1: A certain threshold ratio must be met in order to have HDF5 + * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is + * given by the following: + * + * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30% + * + * where: + * * `sum_chunk` is the combined sum of the number of chunks selected in + * the dataset by all ranks (chunks selected by more than one rank count + * individually toward the sum for each rank selecting that chunk) + * * `dataset_nchunks` is the number of chunks in the dataset (selected + * or not) + * * `mpi_size` is the size of the MPI Communicator + * + * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain + * threshold (as of this writing, 10000). + * + * To satisfy both these conditions, we #define a macro, + * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the + * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the + * 10000 threshold from condition 2). We then create a dataset of that many + * chunks and have each MPI rank write to and read from a piece of every single + * chunk in the dataset. This ensures chunk utilization is the max possible + * and exceeds our 30% target ratio, while always exactly matching the numeric + * chunk threshold value of condition 2. + * + * Failure in this test may either cause a hang, or, due to how the MPI calls + * pertaining to this issue might mistakenly match up, may cause an MPI error + * message similar to: + * + * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed + * major: Internal error (too specific to document in detail) + * minor: Some MPI function failed + * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack: + *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed + *MPIR_Bcast_impl(1452)...: + *MPIR_Bcast(1476)........: + *MPIR_Bcast_intra(1249)..: + *MPIR_SMP_Bcast(1088)....: + *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received + *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String + */ +void +test_link_chunk_io_sort_chunk_issue(void) +{ + const char *filename; + hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS]; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + hid_t mspace_id = H5I_INVALID_HID; + int mpi_rank, mpi_size; + void *data = NULL; + void *read_buf = NULL; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = GetTestParameters(); + + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + + /* + * Even though the testphdf5 framework currently sets collective metadata reads + * on the FAPL, we call it here just to be sure this is futureproof, since + * demonstrating this issue relies upon it. + */ + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded"); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + /* + * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM + * chunks, where every rank writes to a piece of every single chunk to keep utilization high. + */ + dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM; + + fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + /* + * Set up chunking on the dataset in order to reproduce the problem. + */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + + /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */ + chunk_dims[0] = (hsize_t)mpi_size; + + VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0), + "H5Pset_chunk succeeded"); + + dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id, + H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + /* + * Setup hyperslab selection to split the dataset among the ranks. + */ + start[0] = (hsize_t)mpi_rank; + stride[0] = (hsize_t)mpi_size; + count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM; + block[0] = 1; + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + + sel_dims[0] = count[0]; + + mspace_id = H5Screate_simple(1, sel_dims, NULL); + VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); + + data = HDcalloc(1, count[0] * sizeof(int)); + VRFY((data != NULL), "calloc succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "H5Pcreate succeeded"); + + /* + * Enable collective access for the data transfer. + */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded"); + + VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded"); + + /* + * Ensure that linked-chunk I/O is performed since this is + * the particular code path where the issue lies and we don't + * want the library doing multi-chunk I/O behind our backs. + */ + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + read_buf = HDmalloc(count[0] * sizeof(int)); + VRFY((read_buf != NULL), "malloc succeeded"); + + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), + "H5Sselect_hyperslab succeeded"); + + sel_dims[0] = count[0]; + + VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); + + mspace_id = H5Screate_simple(1, sel_dims, NULL); + VRFY((mspace_id >= 0), "H5Screate_simple succeeded"); + + /* + * Finally have each rank read their section of data back from the dataset. + */ + VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), + "H5Dread succeeded"); + + if (data) { + HDfree(data); + data = NULL; + } + + if (read_buf) { + HDfree(read_buf); + read_buf = NULL; + } + + VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded"); + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); +} diff --git a/testpar/t_dset.c b/testpar/t_dset.c index ae022fb..5002fb8 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -38,130 +35,135 @@ * Setup the dimensions of the hyperslab. * Two modes--by rows or by columns. * Assume dimension rank is 2. - * BYROW divide into slabs of rows - * BYCOL divide into blocks of columns - * ZROW same as BYROW except process 0 gets 0 rows - * ZCOL same as BYCOL except process 0 gets 0 columns + * BYROW divide into slabs of rows + * BYCOL divide into blocks of columns + * ZROW same as BYROW except process 0 gets 0 rows + * ZCOL same as BYCOL except process 0 gets 0 columns */ static void -slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], - hsize_t stride[], hsize_t block[], int mode) +slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], + int mode) { - switch (mode){ - case BYROW: - /* Each process takes a slabs of rows. */ - block[0] = dim0/mpi_size; - block[1] = dim1; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = mpi_rank*block[0]; - start[1] = 0; -if(VERBOSE_MED) printf("slab_set BYROW\n"); - break; - case BYCOL: - /* Each process takes a block of columns. */ - block[0] = dim0; - block[1] = dim1/mpi_size; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = mpi_rank*block[1]; -if(VERBOSE_MED) printf("slab_set BYCOL\n"); - break; - case ZROW: - /* Similar to BYROW except process 0 gets 0 row */ - block[0] = (mpi_rank ? dim0/mpi_size : 0); - block[1] = dim1; - stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */ - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (mpi_rank? mpi_rank*block[0] : 0); - start[1] = 0; -if(VERBOSE_MED) printf("slab_set ZROW\n"); - break; - case ZCOL: - /* Similar to BYCOL except process 0 gets 0 column */ - block[0] = dim0; - block[1] = (mpi_rank ? dim1/mpi_size : 0); - stride[0] = block[0]; - stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */ - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = (mpi_rank? mpi_rank*block[1] : 0); -if(VERBOSE_MED) printf("slab_set ZCOL\n"); - break; - default: - /* Unknown mode. Set it to cover the whole dataset. */ - printf("unknown slab_set mode (%d)\n", mode); - block[0] = dim0; - block[1] = dim1; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; -if(VERBOSE_MED) printf("slab_set wholeset\n"); - break; + switch (mode) { + case BYROW: + /* Each process takes a slabs of rows. */ + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)dim1; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set BYROW\n"); + break; + case BYCOL: + /* Each process takes a block of columns. */ + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)(dim1 / mpi_size); + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; + if (VERBOSE_MED) + HDprintf("slab_set BYCOL\n"); + break; + case ZROW: + /* Similar to BYROW except process 0 gets 0 row */ + block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0); + block[1] = (hsize_t)dim1; + stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */ + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0); + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set ZROW\n"); + break; + case ZCOL: + /* Similar to BYCOL except process 0 gets 0 column */ + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0); + stride[0] = block[0]; + stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */ + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0); + if (VERBOSE_MED) + HDprintf("slab_set ZCOL\n"); + break; + default: + /* Unknown mode. Set it to cover the whole dataset. */ + HDprintf("unknown slab_set mode (%d)\n", mode); + block[0] = (hsize_t)dim0; + block[1] = (hsize_t)dim1; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = 0; + start[1] = 0; + if (VERBOSE_MED) + HDprintf("slab_set wholeset\n"); + break; } -if(VERBOSE_MED){ - printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0]*block[1]*count[0]*count[1])); + if (VERBOSE_MED) { + HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); } } /* * Setup the coordinates for point selection. */ -void point_set(hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - size_t num_points, - hsize_t coords[], - int order) +void +point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order) { - hsize_t i,j, k = 0, m ,n, s1 ,s2; + hsize_t i, j, k = 0, m, n, s1, s2; HDcompile_assert(RANK == 2); - if(OUT_OF_ORDER == order) + if (OUT_OF_ORDER == order) k = (num_points * RANK) - 1; - else if(IN_ORDER == order) + else if (IN_ORDER == order) k = 0; s1 = start[0]; s2 = start[1]; - for(i = 0 ; i < count[0]; i++) - for(j = 0 ; j < count[1]; j++) - for(m = 0 ; m < block[0]; m++) - for(n = 0 ; n < block[1]; n++) - if(OUT_OF_ORDER == order) { + for (i = 0; i < count[0]; i++) + for (j = 0; j < count[1]; j++) + for (m = 0; m < block[0]; m++) + for (n = 0; n < block[1]; n++) + if (OUT_OF_ORDER == order) { coords[k--] = s2 + (stride[1] * j) + n; coords[k--] = s1 + (stride[0] * i) + m; } - else if(IN_ORDER == order) { + else if (IN_ORDER == order) { coords[k++] = s1 + stride[0] * i + m; coords[k++] = s2 + stride[1] * j + n; } - if(VERBOSE_MED) { - printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); + if (VERBOSE_MED) { + HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total " + "datapoints=%lu\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1], + (unsigned long)(block[0] * block[1] * count[0] * count[1])); k = 0; - for(i = 0; i < num_points ; i++) { - printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); + for (i = 0; i < num_points; i++) { + HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); k += 2; } } @@ -172,92 +174,90 @@ void point_set(hsize_t start[], * Assume dimension rank is 2 and data is stored contiguous. */ static void -dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset) +dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset) { DATATYPE *dataptr = dataset; - hsize_t i, j; + hsize_t i, j; /* put some trivial data in the data_array */ - for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1)); - dataptr++; - } + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1)); + dataptr++; + } } } - /* * Print the content of the dataset. */ static void -dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset) +dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset) { DATATYPE *dataptr = dataset; - hsize_t i, j; + hsize_t i, j; /* print the column heading */ - printf("%-8s", "Cols:"); - for (j=0; j < block[1]; j++){ - printf("%3lu ", (unsigned long)(start[1]+j)); + HDprintf("%-8s", "Cols:"); + for (j = 0; j < block[1]; j++) { + HDprintf("%3lu ", (unsigned long)(start[1] + j)); } - printf("\n"); + HDprintf("\n"); /* print the slab data */ - for (i=0; i < block[0]; i++){ - printf("Row %2lu: ", (unsigned long)(i+start[0])); - for (j=0; j < block[1]; j++){ - printf("%03d ", *dataptr++); - } - printf("\n"); + for (i = 0; i < block[0]; i++) { + HDprintf("Row %2lu: ", (unsigned long)(i + start[0])); + for (j = 0; j < block[1]; j++) { + HDprintf("%03d ", *dataptr++); + } + HDprintf("\n"); } } - /* * Print the content of the dataset. */ int -dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original) +dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original) { hsize_t i, j; - int vrfyerrs; + int vrfyerrs; /* print it if VERBOSE_MED */ - if(VERBOSE_MED) { - printf("dataset_vrfy dumping:::\n"); - printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); - printf("original values:\n"); - dataset_print(start, block, original); - printf("compared values:\n"); - dataset_print(start, block, dataset); + if (VERBOSE_MED) { + HDprintf("dataset_vrfy dumping:::\n"); + HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", + (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], + (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1], + (unsigned long)block[0], (unsigned long)block[1]); + HDprintf("original values:\n"); + dataset_print(start, block, original); + HDprintf("compared values:\n"); + dataset_print(start, block, dataset); } vrfyerrs = 0; - for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - if(*dataset != *original){ - if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ - printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, - (unsigned long)(i+start[0]), (unsigned long)(j+start[1]), - *(original), *(dataset)); - } - dataset++; - original++; - } - } + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + if (*dataset != *original) { + if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) { + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]), + (unsigned long)(j + start[1]), *(original), *(dataset)); + } + dataset++; + original++; + } + } } - if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if(vrfyerrs) - printf("%d errors found in dataset_vrfy\n", vrfyerrs); - return(vrfyerrs); + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (vrfyerrs) + HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs); + return (vrfyerrs); } - /* * Part 1.a--Independent read/write for fixed dimension datasets. */ @@ -273,36 +273,36 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[] void dataset_writeInd(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ const char *filename; - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Independent write test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Independent write test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); /* ---------------------------------------- @@ -320,29 +320,24 @@ dataset_writeInd(void) ret = H5Pclose(acc_tpl); VRFY((ret >= 0), ""); - /* --------------------------------------------- * Define the dimensions of the overall datasets * and the slabs local to the MPI process. * ------------------------------------------- */ /* setup dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (RANK, dims, NULL); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); - /* create a dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); /* create another dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - /* * To test the independent orders of writes between processes, all * even number processes write to dataset1 first, then dataset2. @@ -357,43 +352,40 @@ dataset_writeInd(void) MESG("data_array initialized"); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); /* setup dimensions again to write with zero rows for process 0 */ - if(VERBOSE_MED) - printf("writeInd by some with zero row\n"); + if (VERBOSE_MED) + HDprintf("writeInd by some with zero row\n"); slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); } MESG("writeInd by some with zero row"); -if((mpi_rank/2)*2 != mpi_rank){ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); -} + if ((mpi_rank / 2) * 2 != mpi_rank) { + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); + } #ifdef BARRIER_CHECKS -MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(MPI_COMM_WORLD); #endif /* BARRIER_CHECKS */ /* release dataspace ID */ @@ -412,44 +404,45 @@ MPI_Barrier(MPI_COMM_WORLD); H5Fclose(fid); /* release data buffers */ - if(data_array1) HDfree(data_array1); + if (data_array1) + HDfree(data_array1); } /* Example of using the parallel HDF5 library to read a dataset */ void dataset_readInd(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ const char *filename; - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Independent read test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Independent read test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); /* setup file access template */ @@ -457,7 +450,7 @@ dataset_readInd(void) VRFY((acc_tpl >= 0), ""); /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); VRFY((fid >= 0), ""); /* Release file-access template */ @@ -472,40 +465,39 @@ dataset_readInd(void) dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); VRFY((dataset2 >= 0), ""); - /* set up dimensions of the slab this process accesses */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), ""); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), ""); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), ""); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), ""); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* close dataset collectively */ ret = H5Dclose(dataset1); @@ -520,11 +512,12 @@ dataset_readInd(void) H5Fclose(fid); /* release data buffers */ - if(data_array1) HDfree(data_array1); - if(data_origin1) HDfree(data_origin1); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); } - /* * Part 1.b--Collective read/write for fixed dimension datasets. */ @@ -541,49 +534,48 @@ dataset_readInd(void) void dataset_writeAll(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */ - hid_t dataset5, dataset6, dataset7; /* Dataset ID */ - hid_t datatype; /* Datatype ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */ + hid_t dataset5, dataset6, dataset7; /* Dataset ID */ + hid_t datatype; /* Datatype ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ const char *filename; - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - hsize_t current_dims; /* for point selection */ - int i; + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + hsize_t current_dims; /* for point selection */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Collective write test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Collective write test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* set up the coords array selection */ - num_points = dim1; - coords = (hsize_t *)HDmalloc(dim1 * RANK * sizeof(hsize_t)); + num_points = (size_t)dim1; + coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t)); VRFY((coords != NULL), "coords malloc succeeded"); /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); /* ------------------- @@ -601,25 +593,23 @@ dataset_writeAll(void) ret = H5Pclose(acc_tpl); VRFY((ret >= 0), ""); - /* -------------------------- * Define the dimensions of the overall datasets * and create the dataset * ------------------------- */ /* setup 2-D dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (RANK, dims, NULL); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); - /* create a dataset collectively */ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); /* create another dataset collectively */ datatype = H5Tcopy(H5T_NATIVE_INT); - ret = H5Tset_order(datatype, H5T_ORDER_LE); + ret = H5Tset_order(datatype, H5T_ORDER_LE); VRFY((ret >= 0), "H5Tset_order succeeded"); dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -658,54 +648,51 @@ dataset_writeAll(void) slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill the local slab with some trivial data */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* write data collectively */ MESG("writeAll by Row"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); /* setup dimensions again to writeAll with zero rows for process 0 */ - if(VERBOSE_MED) - printf("writeAll by some with zero row\n"); + if (VERBOSE_MED) + HDprintf("writeAll by some with zero row\n"); slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); } MESG("writeAll by some with zero row"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); /* release all temporary handles. */ @@ -721,59 +708,56 @@ dataset_writeAll(void) /* put some trivial data in the data_array */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill the local slab with some trivial data */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); /* setup dimensions again to writeAll with zero columns for process 0 */ - if(VERBOSE_MED) - printf("writeAll by some with zero col\n"); + if (VERBOSE_MED) + HDprintf("writeAll by some with zero col\n"); slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); } MESG("writeAll by some with zero col"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded"); /* release all temporary handles. */ @@ -783,16 +767,15 @@ dataset_writeAll(void) H5Sclose(mem_dataspace); H5Pclose(xfer_plist); - /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset3); + file_dataspace = H5Dget_space(dataset3); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - if(MAINPROCESS) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded"); } /* end if */ else { ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); @@ -800,42 +783,39 @@ dataset_writeAll(void) } /* end else */ /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); - if(MAINPROCESS) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded"); } /* end if */ /* fill the local slab with some trivial data */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* end if */ /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* write data collectively */ MESG("writeAll with none"); - ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); /* write data collectively (with datatype conversion) */ MESG("writeAll with none"); - ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); /* release all temporary handles. */ @@ -849,11 +829,11 @@ dataset_writeAll(void) /* Additionally, these are in a scalar dataspace */ /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset4); + file_dataspace = H5Dget_space(dataset4); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - if(MAINPROCESS) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(file_dataspace); + VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded"); } /* end if */ else { ret = H5Sselect_all(file_dataspace); @@ -863,9 +843,9 @@ dataset_writeAll(void) /* create a memory dataspace independently */ mem_dataspace = H5Screate(H5S_SCALAR); VRFY((mem_dataspace >= 0), ""); - if(MAINPROCESS) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_none(mem_dataspace); + VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded"); } /* end if */ else { ret = H5Sselect_all(mem_dataspace); @@ -875,31 +855,29 @@ dataset_writeAll(void) /* fill the local slab with some trivial data */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* end if */ /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* write data collectively */ MESG("writeAll with scalar dataspace"); - ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); /* write data collectively (with datatype conversion) */ MESG("writeAll with scalar dataspace"); - ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); /* release all temporary handles. */ @@ -907,55 +885,54 @@ dataset_writeAll(void) H5Sclose(mem_dataspace); H5Pclose(xfer_plist); - - if(data_array1) free(data_array1); - data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE)); + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - block[0] = 1; - block[1] = dim1; + block[0] = 1; + block[1] = (hsize_t)dim1; stride[0] = 1; - stride[1] = dim1; - count[0] = 1; - count[1] = 1; - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; + stride[1] = (hsize_t)dim1; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* Dataset5: point selection in File - Hyperslab selection in Memory*/ /* create a file dataspace independently */ - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - file_dataspace = H5Dget_space (dataset5); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + file_dataspace = H5Dget_space(dataset5); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); - start[0] = 0; - start[1] = 0; - mem_dataspace = H5Dget_space (dataset5); + start[0] = 0; + start[1] = 0; + mem_dataspace = H5Dget_space(dataset5); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* write data collectively */ - ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset5 succeeded"); /* release all temporary handles. */ @@ -965,35 +942,34 @@ dataset_writeAll(void) /* Dataset6: point selection in File - Point selection in Memory*/ /* create a file dataspace independently */ - start[0] = dim0/mpi_size * mpi_rank; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - file_dataspace = H5Dget_space (dataset6); + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + file_dataspace = H5Dget_space(dataset6); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); start[0] = 0; start[1] = 0; - point_set (start, count, stride, block, num_points, coords, IN_ORDER); - mem_dataspace = H5Dget_space (dataset6); + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + mem_dataspace = H5Dget_space(dataset6); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* write data collectively */ - ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset6 succeeded"); /* release all temporary handles. */ @@ -1003,34 +979,33 @@ dataset_writeAll(void) /* Dataset7: point selection in File - All selection in Memory*/ /* create a file dataspace independently */ - start[0] = dim0/mpi_size * mpi_rank; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; - point_set (start, count, stride, block, num_points, coords, IN_ORDER); - file_dataspace = H5Dget_space (dataset7); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + file_dataspace = H5Dget_space(dataset7); + VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); - current_dims = num_points; - mem_dataspace = H5Screate_simple (1, ¤t_dims, NULL); + current_dims = num_points; + mem_dataspace = H5Screate_simple(1, ¤t_dims, NULL); VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); ret = H5Sselect_all(mem_dataspace); VRFY((ret >= 0), "H5Sselect_all succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* write data collectively */ - ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite dataset7 succeeded"); /* release all temporary handles. */ @@ -1060,8 +1035,10 @@ dataset_writeAll(void) H5Fclose(fid); /* release data buffers */ - if(coords) HDfree(coords); - if(data_array1) HDfree(data_array1); + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); } /* @@ -1076,48 +1053,47 @@ dataset_writeAll(void) void dataset_readAll(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ const char *filename; - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - hsize_t current_dims; /* for point selection */ - int i,j,k; + size_t num_points; /* for point selection */ + hsize_t *coords = NULL; /* for point selection */ + int i, j, k; - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Collective read test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Collective read test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* set up the coords array selection */ - num_points = dim1; - coords = (hsize_t *)HDmalloc(dim0 * dim1 * RANK * sizeof(hsize_t)); + num_points = (size_t)dim1; + coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t)); VRFY((coords != NULL), "coords malloc succeeded"); /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); /* ------------------- @@ -1128,14 +1104,13 @@ dataset_readAll(void) VRFY((acc_tpl >= 0), ""); /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); VRFY((fid >= 0), "H5Fopen succeeded"); /* Release file-access template */ ret = H5Pclose(acc_tpl); VRFY((ret >= 0), ""); - /* -------------------------- * Open the datasets in it * ------------------------- */ @@ -1163,62 +1138,61 @@ dataset_readAll(void) slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset1 succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* setup dimensions again to readAll with zero columns for process 0 */ - if(VERBOSE_MED) - printf("readAll by some with zero col\n"); + if (VERBOSE_MED) + HDprintf("readAll by some with zero col\n"); slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); } MESG("readAll by some with zero col"); - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* release all temporary handles. */ /* Could have used them for dataset2 but it is cleaner */ @@ -1231,219 +1205,221 @@ dataset_readAll(void) slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset2 succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* setup dimensions again to readAll with zero rows for process 0 */ - if(VERBOSE_MED) - printf("readAll by some with zero row\n"); + if (VERBOSE_MED) + HDprintf("readAll by some with zero row\n"); slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); + if (MAINPROCESS) { + ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); } MESG("readAll by some with zero row"); - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* release all temporary handles. */ H5Sclose(file_dataspace); H5Sclose(mem_dataspace); H5Pclose(xfer_plist); - if(data_array1) free(data_array1); - if(data_origin1) free(data_origin1); - data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE)); + if (data_array1) + free(data_array1); + if (data_origin1) + free(data_origin1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_origin1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE)); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - block[0] = 1; - block[1] = dim1; + block[0] = 1; + block[1] = (hsize_t)dim1; stride[0] = 1; - stride[1] = dim1; - count[0] = 1; - count[1] = 1; - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; + stride[1] = (hsize_t)dim1; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); + start[1] = 0; dataset_fill(start, block, data_origin1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_origin1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_origin1); } /* Dataset5: point selection in memory - Hyperslab selection in file*/ /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset5); + file_dataspace = H5Dget_space(dataset5); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); start[0] = 0; start[1] = 0; - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - mem_dataspace = H5Dget_space (dataset5); + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + mem_dataspace = H5Dget_space(dataset5); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* read data collectively */ - ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset5 succeeded"); - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* release all temporary handles. */ H5Sclose(file_dataspace); H5Sclose(mem_dataspace); H5Pclose(xfer_plist); - - if(data_array1) free(data_array1); - data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE)); + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); /* Dataset6: point selection in File - Point selection in Memory*/ /* create a file dataspace independently */ - start[0] = dim0/mpi_size * mpi_rank; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; - point_set (start, count, stride, block, num_points, coords, IN_ORDER); - file_dataspace = H5Dget_space (dataset6); + point_set(start, count, stride, block, num_points, coords, IN_ORDER); + file_dataspace = H5Dget_space(dataset6); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); start[0] = 0; start[1] = 0; - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - mem_dataspace = H5Dget_space (dataset6); + point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER); + mem_dataspace = H5Dget_space(dataset6); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* read data collectively */ - ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset6 succeeded"); ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; + if (ret) + nerrors++; /* release all temporary handles. */ H5Sclose(file_dataspace); H5Sclose(mem_dataspace); H5Pclose(xfer_plist); - if(data_array1) free(data_array1); - data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE)); + if (data_array1) + free(data_array1); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); /* Dataset7: point selection in memory - All selection in file*/ /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset7); + file_dataspace = H5Dget_space(dataset7); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_all(file_dataspace); VRFY((ret >= 0), "H5Sselect_all succeeded"); - num_points = dim0 * dim1; - k=0; - for (i=0 ; i<dim0; i++) { - for (j=0 ; j<dim1; j++) { - coords[k++] = i; - coords[k++] = j; + num_points = (size_t)(dim0 * dim1); + k = 0; + for (i = 0; i < dim0; i++) { + for (j = 0; j < dim1; j++) { + coords[k++] = (hsize_t)i; + coords[k++] = (hsize_t)j; } } - mem_dataspace = H5Dget_space (dataset7); + mem_dataspace = H5Dget_space(dataset7); VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); VRFY((ret >= 0), "H5Sselect_elements succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), ""); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } /* read data collectively */ - ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread dataset7 succeeded"); - start[0] = dim0/mpi_size * mpi_rank; + start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank); start[1] = 0; - ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1); - if(ret) nerrors++; + ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank), + data_origin1); + if (ret) + nerrors++; /* release all temporary handles. */ H5Sclose(file_dataspace); @@ -1468,12 +1444,14 @@ dataset_readAll(void) H5Fclose(fid); /* release data buffers */ - if(coords) HDfree(coords); - if(data_array1) HDfree(data_array1); - if(data_origin1) HDfree(data_origin1); + if (coords) + HDfree(coords); + if (data_array1) + HDfree(data_array1); + if (data_origin1) + HDfree(data_origin1); } - /* * Part 2--Independent read/write for extendible datasets. */ @@ -1489,45 +1467,44 @@ dataset_readAll(void) void extend_writeInd(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t max_dims[RANK] = - {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + hsize_t chunk_dims[RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK]; /* for hyperslab setting */ - hsize_t stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK]; /* for hyperslab setting */ + hsize_t stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = chunkdim0; - chunk_dims[1] = chunkdim1; + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); /* ------------------- @@ -1537,22 +1514,22 @@ extend_writeInd(void) acc_tpl = create_faccess_plist(comm, info, facc_type); VRFY((acc_tpl >= 0), ""); -/* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ -{ - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts=4; - ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); -} + /* Reduce the number of metadata cache slots, so that there are cache + * collisions during the raw data I/O on the chunked dataset. This stresses + * the metadata cache and tests for cache bugs. -QAK + */ + { + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + + ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + VRFY((ret >= 0), "H5Pget_cache succeeded"); + mdc_nelmts = 4; + ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + } /* create the file collectively */ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); @@ -1562,14 +1539,13 @@ extend_writeInd(void) ret = H5Pclose(acc_tpl); VRFY((ret >= 0), ""); - /* -------------------------------------------------------------- * Define the dimensions of the overall datasets and create them. * ------------------------------------------------------------- */ /* set up dataset storage chunk sizes and creation property list */ - if(VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); dataset_pl = H5Pcreate(H5P_DATASET_CREATE); VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); @@ -1578,7 +1554,7 @@ extend_writeInd(void) /* setup dimensionality object */ /* start out with no rows, extend it later. */ dims[0] = dims[1] = 0; - sid = H5Screate_simple (RANK, dims, max_dims); + sid = H5Screate_simple(RANK, dims, max_dims); VRFY((sid >= 0), "H5Screate_simple succeeded"); /* create an extendible dataset collectively */ @@ -1593,8 +1569,6 @@ extend_writeInd(void) H5Sclose(sid); H5Pclose(dataset_pl); - - /* ------------------------- * Test writing to dataset1 * -------------------------*/ @@ -1604,37 +1578,35 @@ extend_writeInd(void) /* put some trivial data in the data_array */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* Extend its current dim sizes before writing */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset1, dims); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset1, dims); VRFY((ret >= 0), "H5Dset_extent succeeded"); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), "H5Dwrite succeeded"); /* release resource */ H5Sclose(file_dataspace); H5Sclose(mem_dataspace); - /* ------------------------- * Test writing to dataset2 * -------------------------*/ @@ -1644,50 +1616,47 @@ extend_writeInd(void) /* put some trivial data in the data_array */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - /* Temporary turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* write data independently. Should fail. */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + H5E_BEGIN_TRY + { + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); + } + H5E_END_TRY VRFY((ret < 0), "H5Dwrite failed as expected"); - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); H5Sclose(file_dataspace); /* Extend dataset2 and try again. Should succeed. */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset2, dims); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset2, dims); VRFY((ret >= 0), "H5Dset_extent succeeded"); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), "H5Dwrite succeeded"); /* release resource */ @@ -1696,7 +1665,6 @@ extend_writeInd(void) ret = H5Sclose(mem_dataspace); VRFY((ret >= 0), "H5Sclose succeeded"); - /* close dataset collectively */ ret = H5Dclose(dataset1); VRFY((ret >= 0), "H5Dclose1 succeeded"); @@ -1707,7 +1675,8 @@ extend_writeInd(void) H5Fclose(fid); /* release data buffers */ - if(data_array1) HDfree(data_array1); + if (data_array1) + HDfree(data_array1); } /* @@ -1720,30 +1689,30 @@ void extend_writeInd2(void) { const char *filename; - hid_t fid; /* HDF5 file ID */ - hid_t fapl; /* File access templates */ - hid_t fs; /* File dataspace ID */ - hid_t ms; /* Memory dataspace ID */ - hid_t dataset; /* Dataset ID */ - hsize_t orig_size=10; /* Original dataset dim size */ - hsize_t new_size=20; /* Extended dataset dim size */ - hsize_t one=1; - hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ - hsize_t chunk_size = 16384; /* chunk size */ - hid_t dcpl; /* dataset create prop. list */ - int written[10], /* Data to write */ - retrieved[10]; /* Data read in */ - int mpi_size, mpi_rank; /* MPI settings */ - int i; /* Local index variable */ - herr_t ret; /* Generic return value */ + hid_t fid; /* HDF5 file ID */ + hid_t fapl; /* File access templates */ + hid_t fs; /* File dataspace ID */ + hid_t ms; /* Memory dataspace ID */ + hid_t dataset; /* Dataset ID */ + hsize_t orig_size = 10; /* Original dataset dim size */ + hsize_t new_size = 20; /* Extended dataset dim size */ + hsize_t one = 1; + hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ + hsize_t chunk_size = 16384; /* chunk size */ + hid_t dcpl; /* dataset create prop. list */ + int written[10], /* Data to write */ + retrieved[10]; /* Data read in */ + int mpi_size, mpi_rank; /* MPI settings */ + int i; /* Local index variable */ + herr_t ret; /* Generic return value */ filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Extend independent write test #2 on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Extend independent write test #2 on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* ------------------- * START AN HDF5 FILE @@ -1760,7 +1729,6 @@ extend_writeInd2(void) ret = H5Pclose(fapl); VRFY((ret >= 0), "H5Pclose succeeded"); - /* -------------------------------------------------------------- * Define the dimensions of the overall datasets and create them. * ------------------------------------------------------------- */ @@ -1772,7 +1740,7 @@ extend_writeInd2(void) VRFY((ret >= 0), "H5Pset_chunk succeeded"); /* setup dimensionality object */ - fs = H5Screate_simple (1, &orig_size, &max_size); + fs = H5Screate_simple(1, &orig_size, &max_size); VRFY((fs >= 0), "H5Screate_simple succeeded"); /* create an extendible dataset collectively */ @@ -1783,7 +1751,6 @@ extend_writeInd2(void) ret = H5Pclose(dcpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /* ------------------------- * Test writing to dataset * -------------------------*/ @@ -1792,14 +1759,14 @@ extend_writeInd2(void) VRFY((ms >= 0), "H5Screate_simple succeeded"); /* put some trivial data in the data_array */ - for(i = 0; i < (int)orig_size; i++) + for (i = 0; i < (int)orig_size; i++) written[i] = i; MESG("data array initialized"); - if(VERBOSE_MED) { - MESG("writing at offset zero: "); - for(i = 0; i < (int)orig_size; i++) - printf("%s%d", i?", ":"", written[i]); - printf("\n"); + if (VERBOSE_MED) { + MESG("writing at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", written[i]); + HDprintf("\n"); } ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); VRFY((ret >= 0), "H5Dwrite succeeded"); @@ -1809,17 +1776,17 @@ extend_writeInd2(void) * -------------------------*/ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); VRFY((ret >= 0), "H5Dread succeeded"); - for (i=0; i<(int)orig_size; i++) - if(written[i]!=retrieved[i]) { - printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__, - i,written[i], i,retrieved[i]); + for (i = 0; i < (int)orig_size; i++) + if (written[i] != retrieved[i]) { + HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, + written[i], i, retrieved[i]); nerrors++; } - if(VERBOSE_MED){ - MESG("read at offset zero: "); - for (i=0; i<(int)orig_size; i++) - printf("%s%d", i?", ":"", retrieved[i]); - printf("\n"); + if (VERBOSE_MED) { + MESG("read at offset zero: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", retrieved[i]); + HDprintf("\n"); } /* ------------------------- @@ -1835,14 +1802,15 @@ extend_writeInd2(void) /* ------------------------- * Write to the second half of the dataset * -------------------------*/ - for (i=0; i<(int)orig_size; i++) - written[i] = orig_size + i; + H5_CHECK_OVERFLOW(orig_size, hsize_t, int); + for (i = 0; i < (int)orig_size; i++) + written[i] = (int)orig_size + i; MESG("data array re-initialized"); - if(VERBOSE_MED) { - MESG("writing at offset 10: "); - for (i=0; i<(int)orig_size; i++) - printf("%s%d", i?", ":"", written[i]); - printf("\n"); + if (VERBOSE_MED) { + MESG("writing at offset 10: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", written[i]); + HDprintf("\n"); } ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size); VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); @@ -1854,20 +1822,19 @@ extend_writeInd2(void) * -------------------------*/ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); VRFY((ret >= 0), "H5Dread succeeded"); - for (i=0; i<(int)orig_size; i++) - if(written[i]!=retrieved[i]) { - printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__, - i,written[i], i,retrieved[i]); + for (i = 0; i < (int)orig_size; i++) + if (written[i] != retrieved[i]) { + HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i, + written[i], i, retrieved[i]); nerrors++; } - if(VERBOSE_MED){ - MESG("read at offset 10: "); - for (i=0; i<(int)orig_size; i++) - printf("%s%d", i?", ":"", retrieved[i]); - printf("\n"); + if (VERBOSE_MED) { + MESG("read at offset 10: "); + for (i = 0; i < (int)orig_size; i++) + HDprintf("%s%d", i ? ", " : "", retrieved[i]); + HDprintf("\n"); } - /* Close dataset collectively */ ret = H5Dclose(dataset); VRFY((ret >= 0), "H5Dclose succeeded"); @@ -1881,41 +1848,41 @@ extend_writeInd2(void) void extend_readInd(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_array2 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ const char *filename; - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Extend independent read test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Extend independent read test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); /* ------------------- @@ -1926,7 +1893,7 @@ extend_readInd(void) VRFY((acc_tpl >= 0), ""); /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); VRFY((fid >= 0), ""); /* Release file-access template */ @@ -1942,88 +1909,85 @@ extend_readInd(void) VRFY((dataset2 >= 0), ""); /* Try extend dataset1 which is open RDONLY. Should fail. */ - /* first turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); dims[0]++; - ret = H5Dset_extent(dataset1, dims); + H5E_BEGIN_TRY + { + ret = H5Dset_extent(dataset1, dims); + } + H5E_END_TRY VRFY((ret < 0), "H5Dset_extent failed as expected"); - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); H5Sclose(file_dataspace); - /* Read dataset1 using BYROW pattern */ /* set up dimensions of the slab this process accesses */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), ""); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), ""); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), "H5Dread succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); VRFY((ret == 0), "dataset1 read verified correct"); - if(ret) nerrors++; + if (ret) + nerrors++; H5Sclose(mem_dataspace); H5Sclose(file_dataspace); - /* Read dataset2 using BYCOL pattern */ /* set up dimensions of the slab this process accesses */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), ""); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), ""); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1); VRFY((ret >= 0), "H5Dread succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); VRFY((ret == 0), "dataset2 read verified correct"); - if(ret) nerrors++; + if (ret) + nerrors++; H5Sclose(mem_dataspace); H5Sclose(file_dataspace); @@ -2034,14 +1998,16 @@ extend_readInd(void) ret = H5Dclose(dataset2); VRFY((ret >= 0), ""); - /* close the file collectively */ H5Fclose(fid); /* release data buffers */ - if(data_array1) HDfree(data_array1); - if(data_array2) HDfree(data_array2); - if(data_origin1) HDfree(data_origin1); + if (data_array1) + HDfree(data_array1); + if (data_array2) + HDfree(data_array2); + if (data_origin1) + HDfree(data_origin1); } /* @@ -2059,46 +2025,45 @@ extend_readInd(void) void extend_writeAll(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - hsize_t max_dims[RANK] = - {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ + hsize_t dims[RANK]; /* dataset dim sizes */ + hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + hsize_t chunk_dims[RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK]; /* for hyperslab setting */ - hsize_t stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK]; /* for hyperslab setting */ + hsize_t stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = chunkdim0; - chunk_dims[1] = chunkdim1; + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); /* ------------------- @@ -2108,22 +2073,22 @@ extend_writeAll(void) acc_tpl = create_faccess_plist(comm, info, facc_type); VRFY((acc_tpl >= 0), ""); -/* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ -{ - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts=4; - ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); -} + /* Reduce the number of metadata cache slots, so that there are cache + * collisions during the raw data I/O on the chunked dataset. This stresses + * the metadata cache and tests for cache bugs. -QAK + */ + { + int mdc_nelmts; + size_t rdcc_nelmts; + size_t rdcc_nbytes; + double rdcc_w0; + + ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0); + VRFY((ret >= 0), "H5Pget_cache succeeded"); + mdc_nelmts = 4; + ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0); + VRFY((ret >= 0), "H5Pset_cache succeeded"); + } /* create the file collectively */ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); @@ -2133,14 +2098,13 @@ extend_writeAll(void) ret = H5Pclose(acc_tpl); VRFY((ret >= 0), ""); - /* -------------------------------------------------------------- * Define the dimensions of the overall datasets and create them. * ------------------------------------------------------------- */ /* set up dataset storage chunk sizes and creation property list */ - if(VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); dataset_pl = H5Pcreate(H5P_DATASET_CREATE); VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); @@ -2149,7 +2113,7 @@ extend_writeAll(void) /* setup dimensionality object */ /* start out with no rows, extend it later. */ dims[0] = dims[1] = 0; - sid = H5Screate_simple (RANK, dims, max_dims); + sid = H5Screate_simple(RANK, dims, max_dims); VRFY((sid >= 0), "H5Screate_simple succeeded"); /* create an extendible dataset collectively */ @@ -2164,8 +2128,6 @@ extend_writeAll(void) H5Sclose(sid); H5Pclose(dataset_pl); - - /* ------------------------- * Test writing to dataset1 * -------------------------*/ @@ -2175,41 +2137,39 @@ extend_writeAll(void) /* put some trivial data in the data_array */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* Extend its current dim sizes before writing */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset1, dims); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset1, dims); VRFY((ret >= 0), "H5Dset_extent succeeded"); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite succeeded"); /* release resource */ @@ -2217,7 +2177,6 @@ extend_writeAll(void) H5Sclose(mem_dataspace); H5Pclose(xfer_plist); - /* ------------------------- * Test writing to dataset2 * -------------------------*/ @@ -2227,61 +2186,57 @@ extend_writeAll(void) /* put some trivial data in the data_array */ dataset_fill(start, block, data_array1); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - /* Temporary turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* write data independently. Should fail. */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + H5E_BEGIN_TRY + { + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); + } + H5E_END_TRY VRFY((ret < 0), "H5Dwrite failed as expected"); - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); H5Sclose(file_dataspace); /* Extend dataset2 and try again. Should succeed. */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset2, dims); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + ret = H5Dset_extent(dataset2, dims); VRFY((ret >= 0), "H5Dset_extent succeeded"); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dwrite succeeded"); /* release resource */ @@ -2292,7 +2247,6 @@ extend_writeAll(void) ret = H5Pclose(xfer_plist); VRFY((ret >= 0), "H5Pclose succeeded"); - /* close dataset collectively */ ret = H5Dclose(dataset1); VRFY((ret >= 0), "H5Dclose1 succeeded"); @@ -2303,49 +2257,50 @@ extend_writeAll(void) H5Fclose(fid); /* release data buffers */ - if(data_array1) HDfree(data_array1); + if (data_array1) + HDfree(data_array1); } /* Example of using the parallel HDF5 library to read an extendible dataset */ void extend_readAll(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_array1 = NULL; /* data buffer */ + DATATYPE *data_array2 = NULL; /* data buffer */ + DATATYPE *data_origin1 = NULL; /* expected data buffer */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Extend independent read test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Extend independent read test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); + data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE)); VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); /* ------------------- @@ -2356,7 +2311,7 @@ extend_readAll(void) VRFY((acc_tpl >= 0), ""); /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); + fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); VRFY((fid >= 0), ""); /* Release file-access template */ @@ -2372,111 +2327,106 @@ extend_readAll(void) VRFY((dataset2 >= 0), ""); /* Try extend dataset1 which is open RDONLY. Should fail. */ - /* first turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); dims[0]++; - ret = H5Dset_extent(dataset1, dims); + H5E_BEGIN_TRY + { + ret = H5Dset_extent(dataset1, dims); + } + H5E_END_TRY VRFY((ret < 0), "H5Dset_extent failed as expected"); - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); H5Sclose(file_dataspace); - /* Read dataset1 using BYROW pattern */ /* set up dimensions of the slab this process accesses */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), ""); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), ""); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); VRFY((ret == 0), "dataset1 read verified correct"); - if(ret) nerrors++; + if (ret) + nerrors++; H5Sclose(mem_dataspace); H5Sclose(file_dataspace); H5Pclose(xfer_plist); - /* Read dataset2 using BYCOL pattern */ /* set up dimensions of the slab this process accesses */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), ""); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), ""); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* fill dataset with test data */ dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(start, block, data_array1); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1); VRFY((ret >= 0), "H5Dread succeeded"); /* verify the read data with original expected data */ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); VRFY((ret == 0), "dataset2 read verified correct"); - if(ret) nerrors++; + if (ret) + nerrors++; H5Sclose(mem_dataspace); H5Sclose(file_dataspace); @@ -2488,14 +2438,16 @@ extend_readAll(void) ret = H5Dclose(dataset2); VRFY((ret >= 0), ""); - /* close the file collectively */ H5Fclose(fid); /* release data buffers */ - if(data_array1) HDfree(data_array1); - if(data_array2) HDfree(data_array2); - if(data_origin1) HDfree(data_origin1); + if (data_array1) + HDfree(data_array1); + if (data_array2) + HDfree(data_array2); + if (data_origin1) + HDfree(data_origin1); } /* @@ -2506,155 +2458,173 @@ extend_readAll(void) void compress_readAll(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t dcpl; /* Dataset creation property list */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t dataspace; /* Dataspace ID */ - hid_t dataset; /* Dataset ID */ - int rank=1; /* Dataspace rank */ - hsize_t dim=dim0; /* Dataspace dimensions */ - unsigned u; /* Local index variable */ - DATATYPE *data_read = NULL; /* data buffer */ - DATATYPE *data_orig = NULL; /* expected data buffer */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t dcpl; /* Dataset creation property list */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t dataspace; /* Dataspace ID */ + hid_t dataset; /* Dataset ID */ + int rank = 1; /* Dataspace rank */ + hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */ + unsigned u; /* Local index variable */ + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ + DATATYPE *data_read = NULL; /* data buffer */ + DATATYPE *data_orig = NULL; /* expected data buffer */ const char *filename; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - int mpi_size, mpi_rank; - herr_t ret; /* Generic return value */ + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Collective chunked dataset read test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Collective chunked dataset read test on file %s\n", filename); /* Retrieve MPI parameters */ - MPI_Comm_size(comm,&mpi_size); - MPI_Comm_rank(comm,&mpi_rank); + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); /* Allocate data buffer */ - data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE)); + data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE)); VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded"); - data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE)); + data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE)); VRFY((data_read != NULL), "data_array1 HDmalloc succeeded"); /* Initialize data buffers */ - for(u=0; u<dim;u++) - data_orig[u]=u; - - /* Process zero creates the file with a compressed, chunked dataset */ - if(mpi_rank==0) { - hsize_t chunk_dim; /* Chunk dimensions */ - - /* Create the file */ - fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY((fid > 0), "H5Fcreate succeeded"); - - /* Create property list for chunking and compression */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl > 0), "H5Pcreate succeeded"); - - ret = H5Pset_layout(dcpl, H5D_CHUNKED); - VRFY((ret >= 0), "H5Pset_layout succeeded"); + for (u = 0; u < dim; u++) + data_orig[u] = (DATATYPE)u; - /* Use eight chunks */ - chunk_dim = dim / 8; - ret = H5Pset_chunk(dcpl, rank, &chunk_dim); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); + /* Run test both with and without filters disabled on partial chunks */ + for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Process zero creates the file with a compressed, chunked dataset */ + if (mpi_rank == 0) { + hsize_t chunk_dim; /* Chunk dimensions */ + + /* Create the file */ + fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid > 0), "H5Fcreate succeeded"); + + /* Create property list for chunking and compression */ + dcpl = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl > 0), "H5Pcreate succeeded"); + + ret = H5Pset_layout(dcpl, H5D_CHUNKED); + VRFY((ret >= 0), "H5Pset_layout succeeded"); + + /* Use eight chunks */ + chunk_dim = dim / 8; + ret = H5Pset_chunk(dcpl, rank, &chunk_dim); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + + /* Set chunk options appropriately */ + if (disable_partial_chunk_filters) { + ret = H5Pget_chunk_opts(dcpl, &chunk_opts); + VRFY((ret >= 0), "H5Pget_chunk_opts succeeded"); + + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; + + ret = H5Pset_chunk_opts(dcpl, chunk_opts); + VRFY((ret >= 0), "H5Pset_chunk_opts succeeded"); + } /* end if */ + + ret = H5Pset_deflate(dcpl, 9); + VRFY((ret >= 0), "H5Pset_deflate succeeded"); + + /* Create dataspace */ + dataspace = H5Screate_simple(rank, &dim, NULL); + VRFY((dataspace > 0), "H5Screate_simple succeeded"); + + /* Create dataset */ + dataset = + H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), "H5Dcreate2 succeeded"); + + /* Write compressed data */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); + VRFY((ret >= 0), "H5Dwrite succeeded"); + + /* Close objects */ + ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Sclose(dataspace); + VRFY((ret >= 0), "H5Sclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } - ret = H5Pset_deflate(dcpl, 9); - VRFY((ret >= 0), "H5Pset_deflate succeeded"); + /* Wait for file to be created */ + MPI_Barrier(comm); - /* Create dataspace */ - dataspace = H5Screate_simple(rank, &dim, NULL); - VRFY((dataspace > 0), "H5Screate_simple succeeded"); + /* ------------------- + * OPEN AN HDF5 FILE + * -------------------*/ - /* Create dataset */ - dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), "H5Dcreate2 succeeded"); + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); - /* Write compressed data */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); - VRFY((ret >= 0), "H5Dwrite succeeded"); + /* open the file collectively */ + fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); + VRFY((fid > 0), "H5Fopen succeeded"); - /* Close objects */ - ret = H5Pclose(dcpl); + /* Release file-access template */ + ret = H5Pclose(acc_tpl); VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Sclose(dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - /* Wait for file to be created */ - MPI_Barrier(comm); - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ + /* Open dataset with compressed chunks */ + dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); + VRFY((dataset > 0), "H5Dopen2 succeeded"); - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); - VRFY((fid > 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - - /* Open dataset with compressed chunks */ - dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); - VRFY((dataset > 0), "H5Dopen2 succeeded"); - - /* Try reading & writing data */ - if(dataset>0) { - /* Create dataset transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist > 0), "H5Pcreate succeeded"); - - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } + /* Try reading & writing data */ + if (dataset > 0) { + /* Create dataset transfer property list */ + xfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((xfer_plist > 0), "H5Pcreate succeeded"); + ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } - /* Try reading the data */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + /* Try reading the data */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); + VRFY((ret >= 0), "H5Dread succeeded"); - /* Verify data read */ - for(u=0; u<dim; u++) - if(data_orig[u]!=data_read[u]) { - printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__, - (unsigned)u,data_orig[u],(unsigned)u,data_read[u]); - nerrors++; - } + /* Verify data read */ + for (u = 0; u < dim; u++) + if (data_orig[u] != data_read[u]) { + HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__, + (unsigned)u, data_orig[u], (unsigned)u, data_read[u]); + nerrors++; + } - /* Writing to the compressed, chunked dataset in parallel should fail */ - H5E_BEGIN_TRY { +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - } H5E_END_TRY; - VRFY((ret < 0), "H5Dwrite failed"); + VRFY((ret >= 0), "H5Dwrite succeeded"); +#endif - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - } /* end if */ + ret = H5Pclose(xfer_plist); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); + } /* end if */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); + /* Close file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } /* end for */ /* release data buffers */ - if(data_read) HDfree(data_read); - if(data_orig) HDfree(data_orig); + if (data_read) + HDfree(data_read); + if (data_orig) + HDfree(data_orig); } #endif /* H5_HAVE_FILTER_DEFLATE */ @@ -2673,43 +2643,43 @@ compress_readAll(void) void none_selection_chunk(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist; /* Dataset transfer properties list */ + hid_t sid; /* Dataspace ID */ + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* memory dataspace ID */ + hid_t dataset1, dataset2; /* Dataset ID */ const char *filename; - hsize_t dims[RANK]; /* dataset dim sizes */ - DATATYPE *data_origin = NULL; /* data buffer */ - DATATYPE *data_array = NULL; /* data buffer */ - hsize_t chunk_dims[RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ + hsize_t dims[RANK]; /* dataset dim sizes */ + DATATYPE *data_origin = NULL; /* data buffer */ + DATATYPE *data_array = NULL; /* data buffer */ + hsize_t chunk_dims[RANK]; /* chunk sizes */ + hid_t dataset_pl; /* dataset create prop. list */ - hsize_t start[RANK]; /* for hyperslab setting */ - hsize_t count[RANK]; /* for hyperslab setting */ - hsize_t stride[RANK]; /* for hyperslab setting */ - hsize_t block[RANK]; /* for hyperslab setting */ - hsize_t mstart[RANK]; /* for data buffer in memory */ + hsize_t start[RANK]; /* for hyperslab setting */ + hsize_t count[RANK]; /* for hyperslab setting */ + hsize_t stride[RANK]; /* for hyperslab setting */ + hsize_t block[RANK]; /* for hyperslab setting */ + hsize_t mstart[RANK]; /* for data buffer in memory */ - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Extend independent write test on file %s\n", filename); + if (VERBOSE_MED) + HDprintf("Extend independent write test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = chunkdim0; - chunk_dims[1] = chunkdim1; + chunk_dims[0] = (hsize_t)chunkdim0; + chunk_dims[1] = (hsize_t)chunkdim1; /* ------------------- * START AN HDF5 FILE @@ -2731,17 +2701,17 @@ none_selection_chunk(void) * ------------------------------------------------------------- */ /* set up dataset storage chunk sizes and creation property list */ - if(VERBOSE_MED) - printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); + if (VERBOSE_MED) + HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); dataset_pl = H5Pcreate(H5P_DATASET_CREATE); VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims); VRFY((ret >= 0), "H5Pset_chunk succeeded"); /* setup dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple(RANK, dims, NULL); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); /* create an extendible dataset collectively */ @@ -2764,65 +2734,64 @@ none_selection_chunk(void) /* allocate memory for data buffer. Only allocate enough buffer for * each processor's data. */ - if(mpi_rank) { - data_origin = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE)); + if (mpi_rank) { + data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE)); VRFY((data_origin != NULL), "data_origin HDmalloc succeeded"); - data_array = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE)); + data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE)); VRFY((data_array != NULL), "data_array HDmalloc succeeded"); /* put some trivial data in the data_array */ mstart[0] = mstart[1] = 0; dataset_fill(mstart, block, data_origin); MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(mstart, block, data_origin); + if (VERBOSE_MED) { + MESG("data_array created"); + dataset_print(mstart, block, data_origin); } } /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* Process 0 has no selection */ - if(!mpi_rank) { + if (!mpi_rank) { ret = H5Sselect_none(mem_dataspace); VRFY((ret >= 0), "H5Sselect_none succeeded"); } /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); + file_dataspace = H5Dget_space(dataset1); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* Process 0 has no selection */ - if(!mpi_rank) { + if (!mpi_rank) { ret = H5Sselect_none(file_dataspace); VRFY((ret >= 0), "H5Sselect_none succeeded"); } /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); + xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_origin); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); VRFY((ret >= 0), "H5Dwrite succeeded"); /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array); + ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); VRFY((ret >= 0), ""); /* verify the read data with original expected data */ - if(mpi_rank) { + if (mpi_rank) { ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); - if(ret) nerrors++; + if (ret) + nerrors++; } /* ------------------------- @@ -2832,19 +2801,18 @@ none_selection_chunk(void) VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); /* write data collectively */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_origin); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin); VRFY((ret >= 0), "H5Dwrite succeeded"); /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array); + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array); VRFY((ret >= 0), ""); /* verify the read data with original expected data */ - if(mpi_rank) { + if (mpi_rank) { ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); - if(ret) nerrors++; + if (ret) + nerrors++; } /* release resource */ @@ -2855,7 +2823,6 @@ none_selection_chunk(void) ret = H5Pclose(xfer_plist); VRFY((ret >= 0), "H5Pclose succeeded"); - /* close dataset collectively */ ret = H5Dclose(dataset1); VRFY((ret >= 0), "H5Dclose1 succeeded"); @@ -2866,24 +2833,25 @@ none_selection_chunk(void) H5Fclose(fid); /* release data buffers */ - if(data_origin) HDfree(data_origin); - if(data_array) HDfree(data_array); + if (data_origin) + HDfree(data_origin); + if (data_array) + HDfree(data_array); } - /* Function: test_actual_io_mode * - * Purpose: tests one specific case of collective I/O and checks that the + * Purpose: tests one specific case of collective I/O and checks that the * actual_chunk_opt_mode property and the actual_io_mode * properties in the DXPL have the correct values. * * Input: selection_mode: changes the way processes select data from the space, as well * as some dxpl flags to get collective I/O to break in different ways. - * + * * The relevant I/O function and expected response for each mode: * TEST_ACTUAL_IO_MULTI_CHUNK_IND: * H5D_mpi_chunk_collective_io, each process reports independent I/O - * + * * TEST_ACTUAL_IO_MULTI_CHUNK_COL: * H5D_mpi_chunk_collective_io, each process reports collective I/O * @@ -2895,7 +2863,7 @@ none_selection_chunk(void) * collective, the rest report independent I/O * * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. + * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. * Set directly go to multi-chunk-io without num threshold calc. * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL. @@ -2912,7 +2880,7 @@ none_selection_chunk(void) * Simple independent I/O. This tests that the defaults are properly set. * * TEST_ACTUAL_IO_RESET: - * Perfroms collective and then independent I/O wit hthe same dxpl to + * Performs collective and then independent I/O with hthe same dxpl to * make sure the peroperty is correctly reset to the default on each use. * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE * (The most complex case that works on all builds) and then performs @@ -2920,78 +2888,76 @@ none_selection_chunk(void) * * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE * is not needed as they are covered by DIRECT_CHUNK_MIX and - * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing - * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold. + * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing + * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold. * * Modification: - * - Refctore to remove multi-chunk-without-opimization test and update for - * testing direct to multi-chunk-io + * - Refctore to remove multi-chunk-without-opimization test and update for + * testing direct to multi-chunk-io * Programmer: Jonathan Kim * Date: 2012-10-10 * - * + * * Programmer: Jacob Gruber * Date: 2011-04-06 */ -static void -test_actual_io_mode(int selection_mode) { - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1; - H5D_mpio_actual_io_mode_t actual_io_mode_write = -1; - H5D_mpio_actual_io_mode_t actual_io_mode_read = -1; - H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1; - const char * filename; - const char * test_name; - hbool_t direct_multi_chunk_io; - hbool_t multi_chunk_io; - hbool_t is_chunked; - hbool_t is_collective; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int * buffer; - int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl = -1; - hid_t mem_space = -1; - hid_t file_space = -1; - hid_t dcpl = -1; - hid_t dxpl_write = -1; - hid_t dxpl_read = -1; - hsize_t dims[RANK]; - hsize_t chunk_dims[RANK]; - hsize_t start[RANK]; - hsize_t stride[RANK]; - hsize_t count[RANK]; - hsize_t block[RANK]; - char message[256]; - herr_t ret; - +static void +test_actual_io_mode(int selection_mode) +{ + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE; + H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + const char *filename; + const char *test_name; + hbool_t direct_multi_chunk_io; + hbool_t multi_chunk_io; + hbool_t is_chunked; + hbool_t is_collective; + int mpi_size = -1; + int mpi_rank = -1; + int length; + int *buffer; + int i; + MPI_Comm mpi_comm = MPI_COMM_NULL; + MPI_Info mpi_info = MPI_INFO_NULL; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl = -1; + hid_t mem_space = -1; + hid_t file_space = -1; + hid_t dcpl = -1; + hid_t dxpl_write = -1; + hid_t dxpl_read = -1; + hsize_t dims[RANK]; + hsize_t chunk_dims[RANK]; + hsize_t start[RANK]; + hsize_t stride[RANK]; + hsize_t count[RANK]; + hsize_t block[RANK]; + char message[256]; + herr_t ret; + /* Set up some flags to make some future if statements slightly more readable */ - direct_multi_chunk_io = ( - selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL ); - + direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then * tests independent I/O */ - multi_chunk_io = ( - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || - selection_mode == TEST_ACTUAL_IO_RESET ); - - is_chunked = ( - selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && - selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); - + multi_chunk_io = + (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || + selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET); + + is_chunked = + (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); + is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; /* Set up MPI parameters */ @@ -2999,7 +2965,7 @@ test_actual_io_mode(int selection_mode) { MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Barrier(MPI_COMM_WORLD); - + HDassert(mpi_size >= 1); mpi_comm = MPI_COMM_WORLD; @@ -3016,10 +2982,10 @@ test_actual_io_mode(int selection_mode) { fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); VRFY((fid >= 0), "H5Fcreate succeeded"); - /* Create the basic Space */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (RANK, dims, NULL); + /* Create the basic Space */ + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); /* Create the dataset creation plist */ @@ -3027,27 +2993,26 @@ test_actual_io_mode(int selection_mode) { VRFY((dcpl >= 0), "dataset creation plist created successfully"); /* If we are not testing contiguous datasets */ - if(is_chunked) { + if (is_chunked) { /* Set up chunk information. */ - chunk_dims[0] = dims[0]/mpi_size; + chunk_dims[0] = dims[0] / (hsize_t)mpi_size; chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); } /* Create the dataset */ - dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, - dcpl, H5P_DEFAULT); + dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); /* Create the file dataspace */ file_space = H5Dget_space(dataset); VRFY((file_space >= 0), "H5Dget_space succeeded"); - /* Choose a selection method based on the type of I/O we want to occur, + /* Choose a selection method based on the type of I/O we want to occur, * and also set up some selection-dependeent test info. */ - switch(selection_mode) { - + switch (selection_mode) { + /* Independent I/O with optimization */ case TEST_ACTUAL_IO_MULTI_CHUNK_IND: case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: @@ -3056,10 +3021,10 @@ test_actual_io_mode(int selection_mode) { * independent. */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Multi Chunk - Independent"; + + test_name = "Multi Chunk - Independent"; actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; + actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; break; /* Collective I/O with optimization */ @@ -3070,51 +3035,52 @@ test_actual_io_mode(int selection_mode) { * selections to each chunk, the operation is purely collective. */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - test_name = "Multi Chunk - Collective"; + + test_name = "Multi Chunk - Collective"; actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if(mpi_size > 1) + if (mpi_size > 1) actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; else actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; break; - + /* Mixed I/O with optimization */ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: /* A chunk will be assigned collective I/O only if it is selected by each * process. To get mixed I/O, have the root select all chunks and each * subsequent process select the first and nth chunk. The first chunk, * accessed by all, will be assigned collective I/O while each other chunk - * will be accessed only by the root and the nth procecess and will be + * will be accessed only by the root and the nth process and will be * assigned independent I/O. Each process will access one chunk collectively * and at least one chunk independently, reporting mixed I/O. */ - - if(mpi_rank == 0) { - /* Select the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - } else { + + if (mpi_rank == 0) { + /* Select the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + } + else { /* Select the first and the nth chunk in the nth column */ - block[0] = dim0 / mpi_size; - block[1] = dim1 / mpi_size; - count[0] = 2; - count[1] = 1; - stride[0] = mpi_rank * block[0]; + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)(dim1 / mpi_size); + count[0] = 2; + count[1] = 1; + stride[0] = (hsize_t)mpi_rank * block[0]; stride[1] = 1; - start[0] = 0; - start[1] = mpi_rank*block[1]; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; } - - test_name = "Multi Chunk - Mixed"; + + test_name = "Multi Chunk - Mixed"; actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; + actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; break; /* RESET tests that the properties are properly reset to defaults each time I/O is - * performed. To acheive this, we have RESET perform collective I/O (which would change + * performed. To achieve this, we have RESET perform collective I/O (which would change * the values from the defaults) followed by independent I/O (which should report the * default values). RESET doesn't need to have a unique selection, so we reuse - * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works + * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works * on all builds. The independent section of RESET can be found at the end of this function. */ case TEST_ACTUAL_IO_RESET: @@ -3123,55 +3089,56 @@ test_actual_io_mode(int selection_mode) { case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: /* A chunk will be assigned collective I/O only if it is selected by each * process. To get mixed I/O with disagreement, assign process n to the - * first chunk and the nth chunk. The first chunk, selected by all, is + * first chunk and the nth chunk. The first chunk, selected by all, is * assgigned collective I/O, while each other process gets independent I/O. * Since the root process with only access the first chunk, it will report * collective I/O. The subsequent processes will access the first chunk - * collectively, and their other chunk indpendently, reporting mixed I/O. + * collectively, and their other chunk independently, reporting mixed I/O. */ - if(mpi_rank == 0) { - /* Select the first chunk in the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - block[0] = block[0] / mpi_size; - } else { + if (mpi_rank == 0) { + /* Select the first chunk in the first column */ + slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); + block[0] = block[0] / (hsize_t)mpi_size; + } + else { /* Select the first and the nth chunk in the nth column */ - block[0] = dim0 / mpi_size; - block[1] = dim1 / mpi_size; - count[0] = 2; - count[1] = 1; - stride[0] = mpi_rank * block[0]; + block[0] = (hsize_t)(dim0 / mpi_size); + block[1] = (hsize_t)(dim1 / mpi_size); + count[0] = 2; + count[1] = 1; + stride[0] = (hsize_t)mpi_rank * block[0]; stride[1] = 1; - start[0] = 0; - start[1] = mpi_rank*block[1]; + start[0] = 0; + start[1] = (hsize_t)mpi_rank * block[1]; } - + /* If the testname was not already set by the RESET case */ if (selection_mode == TEST_ACTUAL_IO_RESET) test_name = "RESET"; else test_name = "Multi Chunk - Mixed (Disagreement)"; - + actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if(mpi_size > 1) { - if(mpi_rank == 0) + if (mpi_size > 1) { + if (mpi_rank == 0) actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; else actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; } else actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - - break; + + break; /* Linked Chunk I/O */ - case TEST_ACTUAL_IO_LINK_CHUNK: + case TEST_ACTUAL_IO_LINK_CHUNK: /* Nothing special; link chunk I/O is forced in the dxpl settings. */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Link Chunk"; + + test_name = "Link Chunk"; actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; + actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; break; /* Contiguous Dataset */ @@ -3179,36 +3146,36 @@ test_actual_io_mode(int selection_mode) { /* A non overlapping, regular selection in a contiguous dataset leads to * collective I/O */ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Contiguous"; + + test_name = "Contiguous"; actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; + actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; break; case TEST_ACTUAL_IO_NO_COLLECTIVE: slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Independent"; + + test_name = "Independent"; actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; break; default: - test_name = "Undefined Selection Mode"; - actual_chunk_opt_mode_expected = -1; - actual_io_mode_expected = -1; + test_name = "Undefined Selection Mode"; + actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; + actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; break; } ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - + /* Create a memory dataspace mirroring the dataset and select the same hyperslab - * as in the file space. + * as in the file space. */ - mem_space = H5Screate_simple (RANK, dims, NULL); + mem_space = H5Screate_simple(RANK, dims, NULL); VRFY((mem_space >= 0), "mem_space created"); - + ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); @@ -3216,39 +3183,39 @@ test_actual_io_mode(int selection_mode) { length = dim0 * dim1; /* Allocate and initialize the buffer */ - buffer = (int *)HDmalloc(sizeof(int) * length); - VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); - for(i = 0; i < length; i++) + buffer = (int *)HDmalloc(sizeof(int) * (size_t)length); + VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); + for (i = 0; i < length; i++) buffer[i] = i; /* Set up the dxpl for the write */ dxpl_write = H5Pcreate(H5P_DATASET_XFER); VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - + /* Set collective I/O properties in the dxpl. */ - if(is_collective) { + if (is_collective) { /* Request collective I/O */ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Set the threshold number of processes per chunk to twice mpi_size. - * This will prevent the threshold from ever being met, thus forcing + + /* Set the threshold number of processes per chunk to twice mpi_size. + * This will prevent the threshold from ever being met, thus forcing * multi chunk io instead of link chunk io. - * This is via deault. + * This is via default. */ - if(multi_chunk_io) { + if (multi_chunk_io) { /* force multi-chunk-io by threshold */ - ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2); + ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2); VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); - /* set this to manipulate testing senario about allocating processes + /* set this to manipulate testing scenario about allocating processes * to chunks */ - ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99); + ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99); VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); } /* Set directly go to multi-chunk-io without threshold calc. */ - if(direct_multi_chunk_io) { + if (direct_multi_chunk_io) { /* set for multi chunk io by property*/ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); @@ -3261,46 +3228,51 @@ test_actual_io_mode(int selection_mode) { /* Write */ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - /* Retreive Actual io valuess */ + /* Retrieve Actual io values */ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY((ret >= 0), "retriving actual io mode suceeded" ); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" ); - + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + /* Read */ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Retreive Actual io values */ + + /* Retrieve Actual io values */ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY((ret >= 0), "retriving actual io mode succeeded" ); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" ); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); /* Check write vs read */ VRFY((actual_io_mode_read == actual_io_mode_write), - "reading and writing are the same for actual_io_mode"); + "reading and writing are the same for actual_io_mode"); VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), - "reading and writing are the same for actual_chunk_opt_mode"); + "reading and writing are the same for actual_chunk_opt_mode"); /* Test values */ - if(actual_chunk_opt_mode_expected != (unsigned) -1 && actual_io_mode_expected != (unsigned) -1) { - sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name); + if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 && + actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) { + HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n", + test_name); VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); - sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name); + HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name); VRFY((actual_io_mode_write == actual_io_mode_expected), message); - } else { - HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, - actual_chunk_opt_mode_write, actual_io_mode_write); + } + else { + HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write, + actual_io_mode_write); } - /* To test that the property is succesfully reset to the default, we perform some + /* To test that the property is successfully reset to the default, we perform some * independent I/O after the collective I/O */ if (selection_mode == TEST_ACTUAL_IO_RESET) { @@ -3317,110 +3289,123 @@ test_actual_io_mode(int selection_mode) { /* Check Properties */ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY( (ret >= 0), "retriving actual io mode succeeded" ); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" ); + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset write (independent)"); + "actual_chunk_opt_mode has correct value for reset write (independent)"); VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset write (independent)"); - + "actual_io_mode has correct value for reset write (independent)"); + /* Read */ ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); /* Check Properties */ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY( (ret >= 0), "retriving actual io mode succeeded" ); + VRFY((ret >= 0), "retrieving actual io mode succeeded"); ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" ); - + VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded"); + VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset read (independent)"); + "actual_chunk_opt_mode has correct value for reset read (independent)"); VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset read (independent)"); - } + "actual_io_mode has correct value for reset read (independent)"); + } } /* Release some resources */ ret = H5Sclose(sid); + VRFY((ret >= 0), "H5Sclose succeeded"); ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); ret = H5Pclose(dcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); ret = H5Pclose(dxpl_write); + VRFY((ret >= 0), "H5Pclose succeeded"); ret = H5Pclose(dxpl_read); + VRFY((ret >= 0), "H5Pclose succeeded"); ret = H5Dclose(dataset); + VRFY((ret >= 0), "H5Dclose succeeded"); ret = H5Sclose(mem_space); + VRFY((ret >= 0), "H5Sclose succeeded"); ret = H5Sclose(file_space); + VRFY((ret >= 0), "H5Sclose succeeded"); ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); HDfree(buffer); return; } - /* Function: actual_io_mode_tests * - * Purpose: Tests all possible cases of the actual_io_mode property. + * Purpose: Tests all possible cases of the actual_io_mode property. * * Programmer: Jacob Gruber * Date: 2011-04-06 */ void -actual_io_mode_tests(void) { +actual_io_mode_tests(void) +{ int mpi_size = -1; - int mpi_rank = -1; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank); - - test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); - - /* - * Test multi-chunk-io via proc_num threshold - */ - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); - - /* The Multi Chunk Mixed test requires atleast three processes. */ - if (mpi_size > 2) - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); - else - HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n"); - - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); - - /* - * Test multi-chunk-io via setting direct property - */ - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); - test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); - - test_actual_io_mode(TEST_ACTUAL_IO_RESET); + /* Only run these tests if selection I/O is not being used - selection I/O + * bypasses this IO mode decision - it's effectively always multi chunk + * currently */ + if (!H5_use_selection_io_g) { + test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); + + /* + * Test multi-chunk-io via proc_num threshold + */ + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); + + /* The Multi Chunk Mixed test requires at least three processes. */ + if (mpi_size > 2) + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); + else + HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); + + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); + + /* + * Test multi-chunk-io via setting direct property + */ + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + + test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); + test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + + test_actual_io_mode(TEST_ACTUAL_IO_RESET); + } + return; } -/* +/* * Function: test_no_collective_cause_mode * - * Purpose: - * tests cases for broken collective I/O and checks that the + * Purpose: + * tests cases for broken collective I/O and checks that the * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values. * - * Input: + * Input: * selection_mode: various mode to cause broken collective I/O * Note: Originally, each TEST case is supposed to be used alone. * After some discussion, this is updated to take multiple TEST cases - * with '|'. However there is no error check for any of combined + * with '|'. However there is no error check for any of combined * test cases, so a tester is responsible to understand and feed * proper combination of TESTs if needed. * - * + * * TEST_COLLECTIVE: * Test for regular collective I/O without cause of breaking. * Just to test normal behavior. - * + * * TEST_SET_INDEPENDENT: * Test for Independent I/O as the cause of breaking collective I/O. * @@ -3428,68 +3413,60 @@ actual_io_mode_tests(void) { * Test for Data Type Conversion as the cause of breaking collective I/O. * * TEST_DATA_TRANSFORMS: - * Test for Data Transfrom feature as the cause of breaking collective I/O. + * Test for Data Transform feature as the cause of breaking collective I/O. * * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES: * Test for NULL dataspace as the cause of breaking collective I/O. - * + * * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT: * Test for Compact layout as the cause of breaking collective I/O. * * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL: * Test for Externl-File storage as the cause of breaking collective I/O. * - * TEST_FILTERS: - * Test for using filter (checksum) as the cause of breaking collective I/O. - * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead. - * - * * Programmer: Jonathan Kim * Date: Aug, 2012 */ +#ifdef LATER #define DSET_NOCOLCAUSE "nocolcause" -#define NELM 2 +#endif #define FILE_EXTERNAL "nocolcause_extern.data" -static void -test_no_collective_cause_mode(int selection_mode) +static void +test_no_collective_cause_mode(int selection_mode) { - uint32_t no_collective_cause_local_write = 0; - uint32_t no_collective_cause_local_read = 0; - uint32_t no_collective_cause_local_expected = 0; - uint32_t no_collective_cause_global_write = 0; - uint32_t no_collective_cause_global_read = 0; + uint32_t no_collective_cause_local_write = 0; + uint32_t no_collective_cause_local_read = 0; + uint32_t no_collective_cause_local_expected = 0; + uint32_t no_collective_cause_global_write = 0; + uint32_t no_collective_cause_global_read = 0; uint32_t no_collective_cause_global_expected = 0; - hsize_t coord[NELM][RANK]; - - const char * filename; - const char * test_name; - hbool_t is_chunked=1; - hbool_t is_independent=0; - int mpi_size = -1; - int mpi_rank = -1; + + const char *filename; + const char *test_name; + hbool_t is_chunked = 1; + hbool_t is_independent = 0; + int mpi_size = -1; + int mpi_rank = -1; int length; - int * buffer; + int *buffer; int i; MPI_Comm mpi_comm; MPI_Info mpi_info; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl = -1; - hid_t dcpl = -1; + hid_t fid = -1; + hid_t sid = -1; + hid_t dataset = -1; + hid_t data_type = H5T_NATIVE_INT; + hid_t fapl = -1; + hid_t dcpl = -1; hid_t dxpl_write = -1; - hid_t dxpl_read = -1; + hid_t dxpl_read = -1; hsize_t dims[RANK]; - hid_t mem_space = -1; + hid_t mem_space = -1; hid_t file_space = -1; hsize_t chunk_dims[RANK]; herr_t ret; -#ifdef LATER /* fletcher32 */ - H5Z_filter_t filter_info; -#endif /* LATER */ /* set to global value as default */ - int l_facc_type = facc_type; + int l_facc_type = facc_type; char message[256]; /* Set up MPI parameters */ @@ -3508,30 +3485,17 @@ test_no_collective_cause_mode(int selection_mode) VRFY((dcpl >= 0), "dataset creation plist created successfully"); if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - ret = H5Pset_layout (dcpl, H5D_COMPACT); - VRFY((ret >= 0),"set COMPACT layout succeeded"); + ret = H5Pset_layout(dcpl, H5D_COMPACT); + VRFY((ret >= 0), "set COMPACT layout succeeded"); is_chunked = 0; } if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - ret = H5Pset_external (dcpl, FILE_EXTERNAL, (off_t) 0, H5F_UNLIMITED); - VRFY((ret >= 0),"set EXTERNAL file layout succeeded"); + ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED); + VRFY((ret >= 0), "set EXTERNAL file layout succeeded"); is_chunked = 0; } -#ifdef LATER /* fletcher32 */ - if (selection_mode & TEST_FILTERS) { - ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32); - VRFY ((ret >=0 ), "Fletcher32 filter is available.\n"); - - ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, &filter_info); - VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n"); - - ret = H5Pset_fletcher32(dcpl); - VRFY((ret >= 0),"set filter (flecher32) succeeded"); - } -#endif /* LATER */ - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { sid = H5Screate(H5S_NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); @@ -3545,13 +3509,12 @@ test_no_collective_cause_mode(int selection_mode) dims[1] = COL_FACTOR * 6; } else { - dims[0] = dim0; - dims[1] = dim1; + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; } - sid = H5Screate_simple (RANK, dims, NULL); + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); } - filename = (const char *)GetTestParameters(); HDassert(filename != NULL); @@ -3566,34 +3529,31 @@ test_no_collective_cause_mode(int selection_mode) VRFY((fid >= 0), "H5Fcreate succeeded"); /* If we are not testing contiguous datasets */ - if(is_chunked) { + if (is_chunked) { /* Set up chunk information. */ - chunk_dims[0] = dims[0]/mpi_size; + chunk_dims[0] = dims[0] / (hsize_t)mpi_size; chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); + ret = H5Pset_chunk(dcpl, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); } - /* Create the dataset */ - dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, - dcpl, H5P_DEFAULT); + dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - /* - * Set expected causes and some tweaks based on the type of test + /* + * Set expected causes and some tweaks based on the type of test */ if (selection_mode & TEST_DATATYPE_CONVERSION) { test_name = "Broken Collective I/O - Datatype Conversion"; no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; /* set different sign to trigger type conversion */ - data_type = H5T_NATIVE_UINT; + data_type = H5T_NATIVE_UINT; } if (selection_mode & TEST_DATA_TRANSFORMS) { - test_name = "Broken Collective I/O - DATA Transfroms"; + test_name = "Broken Collective I/O - DATA Transforms"; no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; } @@ -3611,23 +3571,15 @@ test_no_collective_cause_mode(int selection_mode) no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; } -#ifdef LATER /* fletcher32 */ - if (selection_mode & TEST_FILTERS) { - test_name = "Broken Collective I/O - Filter is required"; - no_collective_cause_local_expected |= H5D_MPIO_FILTERS; - no_collective_cause_global_expected |= H5D_MPIO_FILTERS; - } -#endif /* LATER */ - if (selection_mode & TEST_COLLECTIVE) { - test_name = "Broken Collective I/O - Not Broken"; - no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; + test_name = "Broken Collective I/O - Not Broken"; + no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE; } if (selection_mode & TEST_SET_INDEPENDENT) { - test_name = "Broken Collective I/O - Independent"; - no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; + test_name = "Broken Collective I/O - Independent"; + no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; /* switch to independent io */ is_independent = 1; @@ -3637,7 +3589,7 @@ test_no_collective_cause_mode(int selection_mode) if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES || selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { file_space = H5S_ALL; - mem_space = H5S_ALL; + mem_space = H5S_ALL; } else { /* Get the file dataspace */ @@ -3645,24 +3597,24 @@ test_no_collective_cause_mode(int selection_mode) VRFY((file_space >= 0), "H5Dget_space succeeded"); /* Create the memory dataspace */ - mem_space = H5Screate_simple (RANK, dims, NULL); + mem_space = H5Screate_simple(RANK, dims, NULL); VRFY((mem_space >= 0), "mem_space created"); } /* Get the number of elements in the selection */ - length = dims[0] * dims[1]; + H5_CHECKED_ASSIGN(length, int, dims[0] * dims[1], uint64_t); /* Allocate and initialize the buffer */ - buffer = (int *)HDmalloc(sizeof(int) * length); - VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); - for(i = 0; i < length; i++) + buffer = (int *)HDmalloc(sizeof(int) * (size_t)length); + VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); + for (i = 0; i < length; i++) buffer[i] = i; /* Set up the dxpl for the write */ dxpl_write = H5Pcreate(H5P_DATASET_XFER); VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - if(is_independent) { + + if (is_independent) { /* Set Independent I/O */ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); @@ -3671,32 +3623,31 @@ test_no_collective_cause_mode(int selection_mode) /* Set Collective I/O */ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } if (selection_mode & TEST_DATA_TRANSFORMS) { - ret = H5Pset_data_transform (dxpl_write, "x+1"); + ret = H5Pset_data_transform(dxpl_write, "x+1"); VRFY((ret >= 0), "H5Pset_data_transform succeeded"); } /*--------------------- * Test Write access - *---------------------*/ + *---------------------*/ /* Write */ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause (dxpl_write, &no_collective_cause_local_write, &no_collective_cause_global_write); - VRFY((ret >= 0), "retriving no collective cause succeeded" ); - + ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write, + &no_collective_cause_global_write); + VRFY((ret >= 0), "retrieving no collective cause succeeded"); /*--------------------- * Test Read access - *---------------------*/ + *---------------------*/ /* Make a copy of the dxpl to test the read operation */ dxpl_read = H5Pcopy(dxpl_write); @@ -3705,25 +3656,29 @@ test_no_collective_cause_mode(int selection_mode) /* Read */ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stdout); VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - + /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read); - VRFY((ret >= 0), "retriving no collective cause succeeded" ); + ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read, + &no_collective_cause_global_read); + VRFY((ret >= 0), "retrieving no collective cause succeeded"); /* Check write vs read */ VRFY((no_collective_cause_local_read == no_collective_cause_local_write), - "reading and writing are the same for local cause of Broken Collective I/O"); + "reading and writing are the same for local cause of Broken Collective I/O"); VRFY((no_collective_cause_global_read == no_collective_cause_global_write), - "reading and writing are the same for global cause of Broken Collective I/O"); - + "reading and writing are the same for global cause of Broken Collective I/O"); + /* Test values */ - memset (message, 0, sizeof (message)); - sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name); + HDmemset(message, 0, sizeof(message)); + HDsnprintf(message, sizeof(message), + "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name); VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message); - memset (message, 0, sizeof (message)); - sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name); + HDmemset(message, 0, sizeof(message)); + HDsnprintf(message, sizeof(message), + "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name); VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message); /* Release some resources */ @@ -3754,274 +3709,34 @@ test_no_collective_cause_mode(int selection_mode) return; } - -/* - * Function: test_no_collective_cause_mode_filter - * - * Purpose: - * Test specific for using filter as a caus of broken collective I/O and - * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL - * have the correct values. - * - * NOTE: - * This is a temporary function. - * test_no_collective_cause_mode(TEST_FILTERS) will replace this when - * H5Dcreate and H5write support for mpio and filter feature. - * - * Input: - * TEST_FILTERS_READ: - * Test for using filter (checksum) as the cause of breaking collective I/O. - * - * Programmer: Jonathan Kim - * Date: Aug, 2012 - */ -static void -test_no_collective_cause_mode_filter(int selection_mode) -{ - uint32_t no_collective_cause_local_read = 0; - uint32_t no_collective_cause_local_expected = 0; - uint32_t no_collective_cause_global_read = 0; - uint32_t no_collective_cause_global_expected = 0; - - const char * filename; - const char * test_name; - hbool_t is_chunked=1; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int * buffer; - int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl_write = -1; - hid_t fapl_read = -1; - hid_t dcpl = -1; - hid_t dxpl = -1; - hsize_t dims[RANK]; - hid_t mem_space = -1; - hid_t file_space = -1; - hsize_t chunk_dims[RANK]; - herr_t ret; -#ifdef LATER /* fletcher32 */ - H5Z_filter_t filter_info; -#endif /* LATER */ - char message[256]; - - /* Set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - - MPI_Barrier(MPI_COMM_WORLD); - - HDassert(mpi_size >= 1); - - mpi_comm = MPI_COMM_WORLD; - mpi_info = MPI_INFO_NULL; - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - if (selection_mode == TEST_FILTERS_READ ) { -#ifdef LATER /* fletcher32 */ - ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32); - VRFY ((ret >=0 ), "Fletcher32 filter is available.\n"); - - ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info); - VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n"); - - ret = H5Pset_fletcher32(dcpl); - VRFY((ret >= 0),"set filter (flecher32) succeeded"); -#endif /* LATER */ - } - else { - VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ."); - } - - /* Create the basic Space */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - - filename = (const char *)GetTestParameters(); - HDassert(filename != NULL); - - /* Setup the file access template */ - fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT); - VRFY((fapl_write >= 0), "create_faccess_plist() succeeded"); - - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* If we are not testing contiguous datasets */ - if(is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0]/mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); - } - - - /* Create the dataset */ - dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, - dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - -#ifdef LATER /* fletcher32 */ - /* Set expected cause */ - test_name = "Broken Collective I/O - Filter is required"; - no_collective_cause_local_expected = H5D_MPIO_FILTERS; - no_collective_cause_global_expected = H5D_MPIO_FILTERS; -#endif /* LATER */ - - /* Get the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Create the memory dataspace */ - mem_space = H5Screate_simple (RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - - /* Get the number of elements in the selection */ - length = dim0 * dim1; - - /* Allocate and initialize the buffer */ - buffer = (int *)HDmalloc(sizeof(int) * length); - VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); - for(i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - if (selection_mode == TEST_FILTERS_READ) { - /* To test read in collective I/O mode , write in independent mode - * because write fails with mpio + filter */ - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - else { - /* To test write in collective I/O mode. */ - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer); - - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - - /* Make a copy of the dxpl to test the read operation */ - dxpl = H5Pcopy(dxpl); - VRFY((dxpl >= 0), "H5Pcopy succeeded"); - - if (dataset) - H5Dclose(dataset); - if (fapl_write) - H5Pclose(fapl_write); - if (fid) - H5Fclose(fid); - - - /*--------------------- - * Test Read access - *---------------------*/ - - /* Setup the file access template */ - fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((fapl_read >= 0), "create_faccess_plist() succeeded"); - - fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read); - dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT); - - /* Set collective I/O properties in the dxpl. */ - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer); - - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read); - VRFY((ret >= 0), "retriving no collective cause succeeded" ); - - /* Test values */ - memset (message, 0, sizeof (message)); - sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name); - VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message); - memset (message, 0, sizeof (message)); - sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name); - VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message); - - /* Release some resources */ - if (sid) - H5Sclose(sid); - if (fapl_read) - H5Pclose(fapl_read); - if (dcpl) - H5Pclose(dcpl); - if (dxpl) - H5Pclose(dxpl); - if (dataset) - H5Dclose(dataset); - if (mem_space) - H5Sclose(mem_space); - if (file_space) - H5Sclose(file_space); - if (fid) - H5Fclose(fid); - HDfree(buffer); - return; -} - /* Function: no_collective_cause_tests * - * Purpose: Tests cases for broken collective IO. + * Purpose: Tests cases for broken collective IO. * * Programmer: Jonathan Kim * Date: Aug, 2012 */ -void -no_collective_cause_tests(void) +void +no_collective_cause_tests(void) { - /* - * Test individual cause + /* + * Test individual cause */ - test_no_collective_cause_mode (TEST_COLLECTIVE); - test_no_collective_cause_mode (TEST_SET_INDEPENDENT); - test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode (TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); -#ifdef LATER /* fletcher32 */ - /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and - * H5Dwrite is ready for mpio + filter feature. - */ - /* test_no_collective_cause_mode (TEST_FILTERS); */ - test_no_collective_cause_mode_filter (TEST_FILTERS_READ); -#endif /* LATER */ - - /* - * Test combined causes + test_no_collective_cause_mode(TEST_COLLECTIVE); + test_no_collective_cause_mode(TEST_SET_INDEPENDENT); + test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); + + /* + * Test combined causes */ - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); + test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); + test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | + TEST_DATA_TRANSFORMS); return; } @@ -4040,48 +3755,49 @@ no_collective_cause_tests(void) void dataset_atomicity(void) { - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t dataset1; /* Dataset IDs */ - hsize_t dims[RANK]; /* dataset dim sizes */ - int *write_buf = NULL; /* data buffer */ - int *read_buf = NULL; /* data buffer */ - int buf_size; - hid_t dataset2; - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* Memory dataspace ID */ - hsize_t start[RANK]; - hsize_t stride[RANK]; - hsize_t count[RANK]; - hsize_t block[RANK]; + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t sid; /* Dataspace ID */ + hid_t dataset1; /* Dataset IDs */ + hsize_t dims[RANK]; /* dataset dim sizes */ + int *write_buf = NULL; /* data buffer */ + int *read_buf = NULL; /* data buffer */ + int buf_size; + hid_t dataset2; + hid_t file_dataspace; /* File dataspace ID */ + hid_t mem_dataspace; /* Memory dataspace ID */ + hsize_t start[RANK]; + hsize_t stride[RANK]; + hsize_t count[RANK]; + hsize_t block[RANK]; const char *filename; - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - int i, j, k; - hbool_t atomicity = FALSE; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - dim0 = 64; dim1 = 32; + herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; + int i, j, k; + hbool_t atomicity = FALSE; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + dim0 = 64; + dim1 = 32; filename = GetTestParameters(); if (facc_type != FACC_MPIO) { - printf("Atomicity tests will not work without the MPIO VFD\n"); + HDprintf("Atomicity tests will not work without the MPIO VFD\n"); return; } - if(VERBOSE_MED) - printf("atomic writes to file %s\n", filename); + if (VERBOSE_MED) + HDprintf("atomic writes to file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); buf_size = dim0 * dim1; /* allocate memory for data buffer */ - write_buf = (int *)HDcalloc(buf_size, sizeof(int)); + write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); /* allocate memory for data buffer */ - read_buf = (int *)HDcalloc(buf_size, sizeof(int)); + read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); /* setup file access template */ @@ -4097,31 +3813,27 @@ dataset_atomicity(void) VRFY((ret >= 0), "H5Pclose succeeded"); /* setup dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (RANK, dims, NULL); + dims[0] = (hsize_t)dim0; + dims[1] = (hsize_t)dim1; + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); /* create datasets */ - dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); /* initialize datasets to 0s */ if (0 == mpi_rank) { - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, write_buf); + ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, write_buf); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); } - + ret = H5Dclose(dataset1); VRFY((ret >= 0), "H5Dclose succeeded"); ret = H5Dclose(dataset2); @@ -4131,35 +3843,41 @@ dataset_atomicity(void) ret = H5Fclose(fid); VRFY((ret >= 0), "H5Fclose succeeded"); - MPI_Barrier (comm); + MPI_Barrier(comm); /* make sure setting atomicity fails on a serial file ID */ - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT); - VRFY((fid >= 0), "H5Fopen succeeed"); - - /* should fail */ - ret = H5Fset_mpi_atomicity (fid , TRUE); - VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); + /* file locking allows only one file open (serial) for writing */ + if (MAINPROCESS) { + fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* should fail */ + H5E_BEGIN_TRY + { + ret = H5Fset_mpi_atomicity(fid, TRUE); + } + H5E_END_TRY + VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } - MPI_Barrier (comm); + MPI_Barrier(comm); /* setup file access template */ acc_tpl = create_faccess_plist(comm, info, facc_type); VRFY((acc_tpl >= 0), ""); /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); + fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl); VRFY((fid >= 0), "H5Fopen succeeded"); /* Release file-access template */ ret = H5Pclose(acc_tpl); VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Fset_mpi_atomicity (fid , TRUE); + ret = H5Fset_mpi_atomicity(fid, TRUE); VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded"); /* open dataset1 (contiguous case) */ @@ -4167,22 +3885,22 @@ dataset_atomicity(void) VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); if (0 == mpi_rank) { - for (i=0 ; i<buf_size ; i++) { + for (i = 0; i < buf_size; i++) { write_buf[i] = 5; } } else { - for (i=0 ; i<buf_size ; i++) { + for (i = 0; i < buf_size; i++) { read_buf[i] = 8; } } /* check that the atomicity flag is set */ - ret = H5Fget_mpi_atomicity (fid , &atomicity); + ret = H5Fget_mpi_atomicity(fid, &atomicity); VRFY((ret >= 0), "atomcity get failed"); VRFY((atomicity == TRUE), "atomcity set failed"); - MPI_Barrier (comm); + MPI_Barrier(comm); /* Process 0 writes contiguously to the entire dataset */ if (0 == mpi_rank) { @@ -4195,27 +3913,30 @@ dataset_atomicity(void) VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); } - if(VERBOSE_MED) { - i=0;j=0;k=0; - for (i=0 ; i<dim0 ; i++) { - printf ("\n"); - for (j=0 ; j<dim1 ; j++) - printf ("%d ", read_buf[k++]); + if (VERBOSE_MED) { + i = 0; + j = 0; + k = 0; + for (i = 0; i < dim0; i++) { + HDprintf("\n"); + for (j = 0; j < dim1; j++) + HDprintf("%d ", read_buf[k++]); } } /* The processes that read the dataset must either read all values as 0 (read happened before process 0 wrote to dataset 1), or 5 (read happened after process 0 wrote to dataset 1) */ - if (0 != mpi_rank) { + if (0 != mpi_rank) { int compare = read_buf[0]; - VRFY((compare == 0 || compare == 5), + VRFY((compare == 0 || compare == 5), "Atomicity Test Failed Process %d: Value read should be 0 or 5\n"); - for (i=1; i<buf_size; i++) { + for (i = 1; i < buf_size; i++) { if (read_buf[i] != compare) { - printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare); - nerrors ++; + HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, + read_buf[i], compare); + nerrors++; } } } @@ -4224,116 +3945,122 @@ dataset_atomicity(void) VRFY((ret >= 0), "H5D close succeeded"); /* release data buffers */ - if(write_buf) HDfree(write_buf); - if(read_buf) HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); /* open dataset2 (non-contiguous case) */ dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT); VRFY((dataset2 >= 0), "H5Dopen2 succeeded"); /* allocate memory for data buffer */ - write_buf = (int *)HDcalloc(buf_size, sizeof(int)); + write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); /* allocate memory for data buffer */ - read_buf = (int *)HDcalloc(buf_size, sizeof(int)); + read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int)); VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); - for (i=0 ; i<buf_size ; i++) { + for (i = 0; i < buf_size; i++) { write_buf[i] = 5; } - for (i=0 ; i<buf_size ; i++) { + for (i = 0; i < buf_size; i++) { read_buf[i] = 8; } atomicity = FALSE; /* check that the atomicity flag is set */ - ret = H5Fget_mpi_atomicity (fid , &atomicity); + ret = H5Fget_mpi_atomicity(fid, &atomicity); VRFY((ret >= 0), "atomcity get failed"); VRFY((atomicity == TRUE), "atomcity set failed"); - - block[0] = dim0/mpi_size - 1; - block[1] = dim1/mpi_size - 1; + block[0] = (hsize_t)(dim0 / mpi_size - 1); + block[1] = (hsize_t)(dim1 / mpi_size - 1); stride[0] = block[0] + 1; stride[1] = block[1] + 1; - count[0] = mpi_size; - count[1] = mpi_size; - start[0] = 0; - start[1] = 0; + count[0] = (hsize_t)mpi_size; + count[1] = (hsize_t)mpi_size; + start[0] = 0; + start[1] = 0; /* create a file dataspace */ - file_dataspace = H5Dget_space (dataset2); + file_dataspace = H5Dget_space(dataset2); VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace */ - mem_dataspace = H5Screate_simple (RANK, dims, NULL); + mem_dataspace = H5Screate_simple(RANK, dims, NULL); VRFY((mem_dataspace >= 0), ""); ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - MPI_Barrier (comm); + MPI_Barrier(comm); /* Process 0 writes to the dataset */ if (0 == mpi_rank) { - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, write_buf); + ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf); VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); } /* All processes wait for the write to finish. This works because atomicity is set to true */ - MPI_Barrier (comm); + MPI_Barrier(comm); /* The other processes read the entire dataset */ if (0 != mpi_rank) { - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, read_buf); + ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf); VRFY((ret >= 0), "H5Dread dataset2 succeeded"); } - if(VERBOSE_MED) { + if (VERBOSE_MED) { if (mpi_rank == 1) { - i=0;j=0;k=0; - for (i=0 ; i<dim0 ; i++) { - printf ("\n"); - for (j=0 ; j<dim1 ; j++) - printf ("%d ", read_buf[k++]); + i = 0; + j = 0; + k = 0; + for (i = 0; i < dim0; i++) { + HDprintf("\n"); + for (j = 0; j < dim1; j++) + HDprintf("%d ", read_buf[k++]); } - printf ("\n"); + HDprintf("\n"); } } /* The processes that read the dataset must either read all values as 5 (read happened after process 0 wrote to dataset 1) */ - if (0 != mpi_rank) { + if (0 != mpi_rank) { int compare; - i=0;j=0;k=0; + i = 0; + j = 0; + k = 0; compare = 5; - for (i=0 ; i<dim0 ; i++) { - if (i >= mpi_rank*(block[0]+1)) { + H5_CHECK_OVERFLOW(block[0], hsize_t, int); + H5_CHECK_OVERFLOW(block[1], hsize_t, int); + for (i = 0; i < dim0; i++) { + if (i >= mpi_rank * ((int)block[0] + 1)) { break; } - if ((i+1)%(block[0]+1)==0) { + if ((i + 1) % ((int)block[0] + 1) == 0) { k += dim1; continue; } - for (j=0 ; j<dim1 ; j++) { - if (j >= mpi_rank*(block[1]+1)) { - k += dim1 - mpi_rank*(block[1]+1); + for (j = 0; j < dim1; j++) { + if (j >= mpi_rank * ((int)block[1] + 1)) { + k += dim1 - mpi_rank * ((int)block[1] + 1); break; } - if ((j+1)%(block[1]+1)==0) { + if ((j + 1) % ((int)block[1] + 1) == 0) { k++; continue; } else if (compare != read_buf[k]) { - printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare); + HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, + k, read_buf[k], compare); nerrors++; } - k ++; + k++; } } } @@ -4346,12 +4073,13 @@ dataset_atomicity(void) VRFY((ret >= 0), "H5Sclose succeeded"); /* release data buffers */ - if(write_buf) HDfree(write_buf); - if(read_buf) HDfree(read_buf); + if (write_buf) + HDfree(write_buf); + if (read_buf) + HDfree(read_buf); ret = H5Fclose(fid); VRFY((ret >= 0), "H5Fclose succeeded"); - } /* Function: dense_attr_test @@ -4361,24 +4089,24 @@ dataset_atomicity(void) * Programmer: Quincey Koziol * Date: April, 2013 */ -void -test_dense_attr(void) +void +test_dense_attr(void) { - int mpi_size, mpi_rank; - hid_t fpid, fid; - hid_t gid, gpid; - hid_t atFileSpace, atid; - hsize_t atDims[1] = {10000}; - herr_t status; + int mpi_size, mpi_rank; + hid_t fpid, fid; + hid_t gid, gpid; + hid_t atFileSpace, atid; + hsize_t atDims[1] = {10000}; + herr_t status; const char *filename; /* get filename */ filename = (const char *)GetTestParameters(); - HDassert( filename != NULL ); + HDassert(filename != NULL); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); fpid = H5Pcreate(H5P_FILE_ACCESS); VRFY((fpid > 0), "H5Pcreate succeeded"); @@ -4400,7 +4128,7 @@ test_dense_attr(void) status = H5Pclose(gpid); VRFY((status >= 0), "H5Pclose succeeded"); - atFileSpace = H5Screate_simple(1, atDims, NULL); + atFileSpace = H5Screate_simple(1, atDims, NULL); VRFY((atFileSpace > 0), "H5Screate_simple succeeded"); atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT); VRFY((atid > 0), "H5Acreate succeeded"); @@ -4417,4 +4145,3 @@ test_dense_attr(void) return; } - diff --git a/testpar/t_file.c b/testpar/t_file.c index 70ca60e..99ad13c 100644 --- a/testpar/t_file.c +++ b/testpar/t_file.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -19,6 +16,31 @@ #include "testphdf5.h" +#include "H5CXprivate.h" /* API Contexts */ +#include "H5Iprivate.h" +#include "H5PBprivate.h" + +/* + * This file needs to access private information from the H5F package. + */ +#define H5AC_FRIEND /*suppress error about including H5ACpkg */ +#include "H5ACpkg.h" +#define H5C_FRIEND /*suppress error about including H5Cpkg */ +#include "H5Cpkg.h" +#define H5F_FRIEND /*suppress error about including H5Fpkg */ +#define H5F_TESTING +#include "H5Fpkg.h" +#define H5MF_FRIEND /*suppress error about including H5MFpkg */ +#include "H5MFpkg.h" + +#define NUM_DSETS 5 + +int mpi_size, mpi_rank; + +static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy); +static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, + size_t page_buffer_size); + /* * test file access by communicator besides COMM_WORLD. * Split COMM_WORLD into two, one (even_comm) contains the original @@ -33,65 +55,949 @@ void test_split_comm_access(void) { - int mpi_size, mpi_rank; - MPI_Comm comm; - MPI_Info info = MPI_INFO_NULL; - int is_old, mrc; - int newrank, newprocs; - hid_t fid; /* file IDs */ - hid_t acc_tpl; /* File access properties */ - herr_t ret; /* generic return value */ + MPI_Comm comm; + MPI_Info info = MPI_INFO_NULL; + int is_old, mrc; + int newrank, newprocs; + hid_t fid; /* file IDs */ + hid_t acc_tpl; /* File access properties */ + herr_t ret; /* generic return value */ const char *filename; filename = (const char *)GetTestParameters(); if (VERBOSE_MED) - printf("Split Communicator access test on file %s\n", - filename); + HDprintf("Split Communicator access test on file %s\n", filename); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); - is_old = mpi_rank%2; - mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm); - VRFY((mrc==MPI_SUCCESS), ""); - MPI_Comm_size(comm,&newprocs); - MPI_Comm_rank(comm,&newrank); - - if (is_old){ - /* odd-rank processes */ - mrc = MPI_Barrier(comm); - VRFY((mrc==MPI_SUCCESS), ""); - }else{ - /* even-rank processes */ - int sub_mpi_rank; /* rank in the sub-comm */ - MPI_Comm_rank(comm,&sub_mpi_rank); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret=H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* close the file */ - ret=H5Fclose(fid); - VRFY((ret >= 0), ""); - - /* delete the test file */ - if (sub_mpi_rank == 0){ - mrc = MPI_File_delete((char *)filename, info); - /*VRFY((mrc==MPI_SUCCESS), ""); */ - } + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + is_old = mpi_rank % 2; + mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), ""); + MPI_Comm_size(comm, &newprocs); + MPI_Comm_rank(comm, &newrank); + + if (is_old) { + /* odd-rank processes */ + mrc = MPI_Barrier(comm); + VRFY((mrc == MPI_SUCCESS), ""); + } + else { + /* even-rank processes */ + int sub_mpi_rank; /* rank in the sub-comm */ + MPI_Comm_rank(comm, &sub_mpi_rank); + + /* setup file access template */ + acc_tpl = create_faccess_plist(comm, info, facc_type); + VRFY((acc_tpl >= 0), ""); + + /* create the file collectively */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + /* Release file-access template */ + ret = H5Pclose(acc_tpl); + VRFY((ret >= 0), ""); + + /* close the file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), ""); + + /* delete the test file */ + if (sub_mpi_rank == 0) { + mrc = MPI_File_delete(filename, info); + /*VRFY((mrc==MPI_SUCCESS), ""); */ + } } mrc = MPI_Comm_free(&comm); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free succeeded"); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "final MPI_Barrier succeeded"); + VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded"); +} + +void +test_page_buffer_access(void) +{ + hid_t file_id = -1; /* File ID */ + hid_t fcpl, fapl; + size_t page_count = 0; + int i, num_elements = 200; + haddr_t raw_addr, meta_addr; + int *data; + H5F_t *f = NULL; + herr_t ret; /* generic return value */ + const char *filename; + hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + filename = (const char *)GetTestParameters(); + + if (VERBOSE_MED) + HDprintf("Page Buffer Usage in Parallel %s\n", filename); + + fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl >= 0), "create_faccess_plist succeeded"); + fcpl = H5Pcreate(H5P_FILE_CREATE); + VRFY((fcpl >= 0), ""); + + ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0); + VRFY((ret == 0), ""); + ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128); + VRFY((ret == 0), ""); + ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0); + VRFY((ret == 0), ""); + + /* This should fail because collective metadata writes are not supported with page buffering */ + H5E_BEGIN_TRY + { + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + } + H5E_END_TRY; + VRFY((file_id < 0), "H5Fcreate failed"); + + /* disable collective metadata writes for page buffering to work */ + ret = H5Pset_coll_metadata_write(fapl, FALSE); + VRFY((ret >= 0), ""); + + ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED); + VRFY((ret == 0), ""); + ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100, + sizeof(int) * 100000); + VRFY((ret == 0), ""); + + ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY); + VRFY((ret == 0), ""); + ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100, + sizeof(int) * 100000); + VRFY((ret == 0), ""); + + ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100); + VRFY((ret == 0), ""); + + data = (int *)HDmalloc(sizeof(int) * (size_t)num_elements); + + /* initialize all the elements to have a value of -1 */ + for (i = 0; i < num_elements; i++) + data[i] = -1; + if (MAINPROCESS) { + hid_t fapl_self = H5I_INVALID_HID; + fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type); + + ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0); + VRFY((ret == 0), ""); + /* collective metadata writes do not work with page buffering */ + ret = H5Pset_coll_metadata_write(fapl_self, FALSE); + VRFY((ret >= 0), ""); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self); + VRFY((file_id >= 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + /* Get a pointer to the internal file object */ + f = (H5F_t *)H5I_object(file_id); + + VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); + + /* allocate space for 200 raw elements */ + raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements); + VRFY((raw_addr != HADDR_UNDEF), ""); + + /* allocate space for 200 metadata elements */ + meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements); + VRFY((meta_addr != HADDR_UNDEF), ""); + + page_count = 0; + + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the first 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + page_count += 2; + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the second 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i + 50; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update 100 - 200 */ + for (i = 0; i < 100; i++) + data[i] = i + 100; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + ret = H5PB_flush(f->shared); + VRFY((ret == 0), ""); + + /* read elements 0 - 200 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* close the file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + ret = H5Pclose(fapl_self); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + } + + MPI_Barrier(MPI_COMM_WORLD); + + if (mpi_size > 1) { + ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0); + VRFY((ret == 0), ""); + /* collective metadata writes do not work with page buffering */ + ret = H5Pset_coll_metadata_write(fapl, FALSE); + VRFY((ret >= 0), ""); + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + VRFY((file_id >= 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + /* Get a pointer to the internal file object */ + f = (H5F_t *)H5I_object(file_id); + + VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process"); + + /* allocate space for 200 raw elements */ + raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements); + VRFY((raw_addr != HADDR_UNDEF), ""); + /* allocate space for 200 metadata elements */ + meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements); + VRFY((meta_addr != HADDR_UNDEF), ""); + + page_count = 0; + + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data); + VRFY((ret == 0), ""); + + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the first 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update the second 50 elements */ + for (i = 0; i < 50; i++) + data[i] = i + 50; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* update 100 - 200 */ + for (i = 0; i < 100; i++) + data[i] = i + 100; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + /* read elements 0 - 200 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 200; i++) + VRFY((data[i] == i), "Read different values than written"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + page_count += 1; + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == i), "Read different values than written"); + + MPI_Barrier(MPI_COMM_WORLD); + /* reset the first 50 elements to -1*/ + for (i = 0; i < 50; i++) + data[i] = -1; + ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + + /* read elements 0 - 50 */ + ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == -1), "Read different values than written"); + ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data); + VRFY((ret == 0), ""); + VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB"); + for (i = 0; i < 50; i++) + VRFY((data[i] == -1), "Read different values than written"); + + /* close the file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(fcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + + HDfree(data); + data = NULL; + MPI_Barrier(MPI_COMM_WORLD); +} + +static int +create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy) +{ + hid_t file_id, dset_id, grp_id; + hid_t sid, mem_dataspace; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + DATATYPE *data_array = NULL; + hsize_t dims[RANK], i; + hsize_t num_elements; + int k; + char dset_name[20]; + H5F_t *f = NULL; + H5C_t *cache_ptr = NULL; + H5AC_cache_config_t config; + hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ + herr_t ret; + + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl); + VRFY((file_id >= 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + f = (H5F_t *)H5I_object(file_id); + VRFY((f != NULL), ""); + + cache_ptr = f->shared->cache; + VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); + + cache_ptr->ignore_tags = TRUE; + H5C_stats__reset(cache_ptr); + config.version = H5AC__CURR_CACHE_CONFIG_VERSION; + + ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config); + VRFY((ret == 0), ""); + + config.metadata_write_strategy = metadata_write_strategy; + + ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config); + VRFY((ret == 0), ""); + + grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id >= 0), ""); + + dims[0] = (hsize_t)(ROW_FACTOR * mpi_size); + dims[1] = (hsize_t)(COL_FACTOR * mpi_size); + sid = H5Screate_simple(RANK, dims, NULL); + VRFY((sid >= 0), "H5Screate_simple succeeded"); + + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + num_elements = block[0] * block[1]; + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + /* put some trivial data in the data_array */ + for (i = 0; i < num_elements; i++) + data_array[i] = mpi_rank + 1; + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(1, &num_elements, NULL); + VRFY((mem_dataspace >= 0), ""); + + for (k = 0; k < NUM_DSETS; k++) { + HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k); + dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret == 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + HDmemset(data_array, 0, num_elements * sizeof(DATATYPE)); + dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret == 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret == 0), ""); + + for (i = 0; i < num_elements; i++) + VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed"); + + HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k); + ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT); + VRFY((ret == 0), ""); + } + + ret = H5Gclose(grp_id); + VRFY((ret == 0), ""); + ret = H5Fclose(file_id); + VRFY((ret == 0), ""); + ret = H5Sclose(sid); + VRFY((ret == 0), ""); + ret = H5Sclose(mem_dataspace); + VRFY((ret == 0), ""); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + + MPI_Barrier(MPI_COMM_WORLD); + HDfree(data_array); + return 0; +} /* create_file */ + +static int +open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size, + size_t page_buffer_size) +{ + hid_t file_id, dset_id, grp_id, grp_id2; + hid_t sid, mem_dataspace; + DATATYPE *data_array = NULL; + hsize_t dims[RANK]; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + int i, k, ndims; + hsize_t num_elements; + char dset_name[20]; + H5F_t *f = NULL; + H5C_t *cache_ptr = NULL; + H5AC_cache_config_t config; + hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ + herr_t ret; + + config.version = H5AC__CURR_CACHE_CONFIG_VERSION; + ret = H5Pget_mdc_config(fapl, &config); + VRFY((ret == 0), ""); + + config.metadata_write_strategy = metadata_write_strategy; + + ret = H5Pget_mdc_config(fapl, &config); + VRFY((ret == 0), ""); + + file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((file_id >= 0), ""); + + /* Push API context */ + ret = H5CX_push(); + VRFY((ret == 0), "H5CX_push()"); + api_ctx_pushed = TRUE; + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + f = (H5F_t *)H5I_object(file_id); + VRFY((f != NULL), ""); + + cache_ptr = f->shared->cache; + VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), ""); + + MPI_Barrier(MPI_COMM_WORLD); + + VRFY((f->shared->page_buf != NULL), ""); + VRFY((f->shared->page_buf->page_size == page_size), ""); + VRFY((f->shared->page_buf->max_size == page_buffer_size), ""); + + grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT); + VRFY((grp_id >= 0), ""); + + dims[0] = (hsize_t)(ROW_FACTOR * mpi_size); + dims[1] = (hsize_t)(COL_FACTOR * mpi_size); + + /* Each process takes a slabs of rows. */ + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; + stride[0] = block[0]; + stride[1] = block[1]; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + num_elements = block[0] * block[1]; + /* allocate memory for data buffer */ + data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE)); + VRFY((data_array != NULL), "data_array HDmalloc succeeded"); + + /* create a memory dataspace independently */ + mem_dataspace = H5Screate_simple(1, &num_elements, NULL); + VRFY((mem_dataspace >= 0), ""); + + for (k = 0; k < NUM_DSETS; k++) { + HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k); + dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT); + VRFY((dset_id >= 0), ""); + + sid = H5Dget_space(dset_id); + VRFY((dset_id >= 0), "H5Dget_space succeeded"); + + ndims = H5Sget_simple_extent_dims(sid, dims, NULL); + VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded"); + VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions"); + VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions"); + + ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); + + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); + VRFY((ret >= 0), ""); + + ret = H5Dclose(dset_id); + VRFY((ret >= 0), ""); + ret = H5Sclose(sid); + VRFY((ret == 0), ""); + + for (i = 0; i < (int)num_elements; i++) + VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed"); + } + + grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((grp_id2 >= 0), ""); + ret = H5Gclose(grp_id2); + VRFY((ret == 0), ""); + + ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + VRFY((ret == 0), ""); + + MPI_Barrier(MPI_COMM_WORLD); + /* flush invalidate each ring, starting from the outermost ring and + * working inward. + */ + for (i = 0; i < H5C__HASH_TABLE_LEN; i++) { + H5C_cache_entry_t *entry_ptr = NULL; + + entry_ptr = cache_ptr->index[i]; + + while (entry_ptr != NULL) { + HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC); + HDassert(entry_ptr->is_dirty == FALSE); + + if (!entry_ptr->is_pinned && !entry_ptr->is_protected) { + ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0); + VRFY((ret == 0), ""); + } + + entry_ptr = entry_ptr->ht_next; + } + } + MPI_Barrier(MPI_COMM_WORLD); + + grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((grp_id2 >= 0), ""); + ret = H5Gclose(grp_id2); + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret == 0), ""); + + ret = H5Gclose(grp_id); + VRFY((ret == 0), ""); + ret = H5Fclose(file_id); + VRFY((ret == 0), ""); + ret = H5Sclose(mem_dataspace); + VRFY((ret == 0), ""); + + /* Pop API context */ + if (api_ctx_pushed) { + ret = H5CX_pop(FALSE); + VRFY((ret == 0), "H5CX_pop()"); + api_ctx_pushed = FALSE; + } + + HDfree(data_array); + + return nerrors; } +/* + * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the + * incoming fapl that could conflict with the existing values in H5F_shared_t on + * multiple opens of the same file. + */ +void +test_file_properties(void) +{ + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */ + hbool_t is_coll; + htri_t are_equal; + const char *filename; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + MPI_Comm comm_out = MPI_COMM_NULL; + MPI_Info info_out = MPI_INFO_NULL; + herr_t ret; /* Generic return value */ + int mpi_ret; /* MPI return value */ + int cmp; /* Compare value */ + + filename = (const char *)GetTestParameters(); + + /* set up MPI parameters */ + mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded"); + mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded"); + mpi_ret = MPI_Info_create(&info); + VRFY((mpi_ret >= 0), "MPI_Info_create succeeded"); + mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + + /* setup file access plist */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio"); + + /* Check getting and setting MPI properties + * (for use in VOL connectors, not the MPI-I/O VFD) + */ + ret = H5Pset_mpi_params(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out); + VRFY((ret >= 0), "H5Pget_mpi_params succeeded"); + + /* Check the communicator */ + VRFY((comm != comm_out), "Communicators should not be bitwise identical"); + cmp = MPI_UNEQUAL; + mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp); + VRFY((ret >= 0), "MPI_Comm_compare succeeded"); + VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare"); + + /* Check the info object */ + VRFY((info != info_out), "Info objects should not be bitwise identical"); + + /* Free the obtained comm and info object */ + mpi_ret = MPI_Comm_free(&comm_out); + VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded"); + mpi_ret = MPI_Info_free(&info_out); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + + /* Copy the fapl and ensure it's equal to the original */ + fapl_copy_id = H5Pcopy(fapl_id); + VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((TRUE == are_equal), "H5Pequal"); + + /* Add a property to the copy and ensure it's different now */ + mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + ret = H5Pset_mpi_params(fapl_copy_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((FALSE == are_equal), "H5Pequal"); + + /* Add a property with the same key but a different value to the original + * and ensure they are still different. + */ + mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + ret = H5Pset_mpi_params(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((FALSE == are_equal), "H5Pequal"); + + /* Set the second property in the original to the same + * value as the copy and ensure they are the same now. + */ + mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc"); + VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set"); + ret = H5Pset_mpi_params(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_mpi_params succeeded"); + are_equal = H5Pequal(fapl_id, fapl_copy_id); + VRFY((TRUE == are_equal), "H5Pequal"); + + /* create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Open the file with the MPI-IO driver */ + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio failed"); + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Open the file with the MPI-IO driver w/ collective settings */ + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((ret >= 0), "H5Pset_fapl_mpio failed"); + /* Collective metadata writes */ + ret = H5Pset_coll_metadata_write(fapl_id, TRUE); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + /* Collective metadata read API calling requirement */ + ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement"); + + /* close fapl and retrieve it from file */ + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + fapl_id = H5I_INVALID_HID; + + fapl_id = H5Fget_access_plist(fid); + VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS"); + + /* verify settings for file access properties */ + + /* Collective metadata writes */ + ret = H5Pget_coll_metadata_write(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes"); + + /* Collective metadata read API calling requirement */ + ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll); + VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded"); + VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement"); + + /* close file */ + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Release file-access plist */ + ret = H5Pclose(fapl_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Pclose(fapl_copy_id); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* Free the MPI info object */ + mpi_ret = MPI_Info_free(&info); + VRFY((mpi_ret >= 0), "MPI_Info_free succeeded"); + +} /* end test_file_properties() */ + +void +test_delete(void) +{ + hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */ + hid_t fapl_id = H5I_INVALID_HID; /* File access plist */ + const char *filename = NULL; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */ + herr_t ret; /* Generic return value */ + + filename = (const char *)GetTestParameters(); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* setup file access plist */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate"); + ret = H5Pset_fapl_mpio(fapl_id, comm, info); + VRFY((SUCCEED == ret), "H5Pset_fapl_mpio"); + + /* create the file */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((fid != H5I_INVALID_HID), "H5Fcreate"); + + /* close the file */ + ret = H5Fclose(fid); + VRFY((SUCCEED == ret), "H5Fclose"); + + /* Verify that the file is an HDF5 file */ + is_hdf5 = H5Fis_accessible(filename, fapl_id); + VRFY((TRUE == is_hdf5), "H5Fis_accessible"); + + /* Delete the file */ + ret = H5Fdelete(filename, fapl_id); + VRFY((SUCCEED == ret), "H5Fdelete"); + + /* Verify that the file is NO LONGER an HDF5 file */ + /* This should fail since there is no file */ + H5E_BEGIN_TRY + { + is_hdf5 = H5Fis_accessible(filename, fapl_id); + } + H5E_END_TRY; + VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible"); + + /* Release file-access plist */ + ret = H5Pclose(fapl_id); + VRFY((SUCCEED == ret), "H5Pclose"); +} /* end test_delete() */ diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c index 544ba32..676978c 100644 --- a/testpar/t_file_image.c +++ b/testpar/t_file_image.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -23,11 +20,11 @@ * * Process zero: * - * 1) Creates a core file with an integer vector data set of - * length n (= mpi_size), + * 1) Creates a core file with an integer vector data set of + * length n (= mpi_size), * - * 2) Initializes the vector to zero in * location 0, and to -1 - * everywhere else. + * 2) Initializes the vector to zero in * location 0, and to -1 + * everywhere else. * * 3) Flushes the core file, and gets an image of it. Closes * the core file. @@ -37,7 +34,7 @@ * 5) Awaits receipt on a file image from process n-1. * * 6) opens the image received from process n-1, verifies that - * it contains a vector of length equal to mpi_size, and + * it contains a vector of length equal to mpi_size, and * that the vector contains (0, 1, 2, ... n-1) * * 7) closes the core file and exits. @@ -47,7 +44,7 @@ * 1) Await receipt of file image from process (i - 1). * * 2) Open the image with the core file driver, verify that i - * contains a vector v of length, and that v[j] = j for + * contains a vector v of length, and that v[j] = j for * 0 <= j < i, and that v[j] == -1 for i <= j < n * * 3) Set v[i] = i in the core file. @@ -63,81 +60,75 @@ void file_image_daisy_chain_test(void) { - char file_name[1024] = "\0"; - int mpi_size, mpi_rank; - int mpi_result; - int i; - int space_ndims; + char file_name[1024] = "\0"; + int mpi_size, mpi_rank; + int mpi_result; + int i; + int space_ndims; MPI_Status rcvstat; - int * vector_ptr = NULL; - hid_t fapl_id = -1; - hid_t file_id; /* file IDs */ - hid_t dset_id = -1; - hid_t dset_type_id = -1; - hid_t space_id = -1; - herr_t err; - hsize_t dims[1]; - void * image_ptr = NULL; - ssize_t bytes_read; - ssize_t image_len; - hbool_t vector_ok = TRUE; - htri_t tri_result; - + int *vector_ptr = NULL; + hid_t fapl_id = -1; + hid_t file_id; /* file IDs */ + hid_t dset_id = -1; + hid_t dset_type_id = -1; + hid_t space_id = -1; + herr_t err; + hsize_t dims[1]; + void *image_ptr = NULL; + ssize_t bytes_read; + ssize_t image_len; + hbool_t vector_ok = TRUE; + htri_t tri_result; /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* setup file name */ - HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", - (int)mpi_rank); + HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank); + + if (mpi_rank == 0) { - if(mpi_rank == 0) { - - /* 1) Creates a core file with an integer vector data set - * of length mpi_size, + /* 1) Creates a core file with an integer vector data set + * of length mpi_size, */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); + fapl_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl_id >= 0), "creating fapl"); - err = H5Pset_fapl_core(fapl_id, (size_t)(64 *1024), FALSE); + err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); VRFY((err >= 0), "setting core file driver in fapl."); file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id); VRFY((file_id >= 0), "created core file"); - dims[0] = (hsize_t)mpi_size; - space_id = H5Screate_simple(1, dims, dims); + dims[0] = (hsize_t)mpi_size; + space_id = H5Screate_simple(1, dims, dims); VRFY((space_id >= 0), "created data space"); - dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dset_id >= 0), "created data set"); - - /* 2) Initialize the vector to zero in location 0, and - * to -1 everywhere else. + /* 2) Initialize the vector to zero in location 0, and + * to -1 everywhere else. */ - vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); + vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); VRFY((vector_ptr != NULL), "allocated in memory representation of vector"); vector_ptr[0] = 0; - for(i = 1; i < mpi_size; i++) + for (i = 1; i < mpi_size; i++) vector_ptr[i] = -1; - err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, (void *)vector_ptr); + err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); VRFY((err >= 0), "wrote initial data to vector."); HDfree(vector_ptr); vector_ptr = NULL; - /* 3) Flush the core file, and get an image of it. Close * the core file. */ - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "flushed core file."); image_len = H5Fget_file_image(file_id, NULL, (size_t)0); @@ -150,203 +141,192 @@ file_image_daisy_chain_test(void) VRFY(bytes_read == image_len, "wrote file into image buffer"); err = H5Sclose(space_id); - VRFY((err >= 0), "closed data space."); + VRFY((err >= 0), "closed data space."); - err = H5Dclose(dset_id); - VRFY((err >= 0), "closed data set."); + err = H5Dclose(dset_id); + VRFY((err >= 0), "closed data set."); - err = H5Fclose(file_id); - VRFY((err >= 0), "closed core file(1)."); + err = H5Fclose(file_id); + VRFY((err >= 0), "closed core file(1)."); - err = H5Pclose(fapl_id); - VRFY((err >= 0), "closed fapl(1)."); + err = H5Pclose(fapl_id); + VRFY((err >= 0), "closed fapl(1)."); - /* 4) Send the image to process 1. */ - mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), - MPI_BYTE, 1, 0, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1"); + mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1"); - mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, - MPI_BYTE, 1, 0, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1"); + mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1"); HDfree(image_ptr); image_ptr = NULL; image_len = 0; + /* 5) Await receipt on a file image from process n-1. */ - /* 5) Await receipt on a file image from process n-1. */ - - mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), - MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, - &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1"); + mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0, + MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1"); image_ptr = (void *)HDmalloc((size_t)image_len); VRFY(image_ptr != NULL, "allocated file image receive buffer."); - mpi_result = MPI_Recv((void *)image_ptr, (int)image_len, - MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, - &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), \ - "received file image from process n-1"); - - /* 6) open the image received from process n-1, verify that - * it contains a vector of length equal to mpi_size, and - * that the vector contains (0, 1, 2, ... n-1). + mpi_result = + MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1"); + + /* 6) open the image received from process n-1, verify that + * it contains a vector of length equal to mpi_size, and + * that the vector contains (0, 1, 2, ... n-1). */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); + fapl_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl_id >= 0), "creating fapl"); - err = H5Pset_fapl_core(fapl_id, (size_t)(64 *1024), FALSE); + err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); VRFY((err >= 0), "setting core file driver in fapl."); - err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); + err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); VRFY((err >= 0), "set file image in fapl."); file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id); VRFY((file_id >= 0), "opened received file image file"); - dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); VRFY((dset_id >= 0), "opened data set"); - dset_type_id = H5Dget_type(dset_id); + dset_type_id = H5Dget_type(dset_id); VRFY((dset_type_id >= 0), "obtained data set type"); - tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); + tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); VRFY((tri_result == TRUE), "verified data set type"); - space_id = H5Dget_space(dset_id); + space_id = H5Dget_space(dset_id); VRFY((space_id >= 0), "opened data space"); - space_ndims = H5Sget_simple_extent_ndims(space_id); - VRFY((space_ndims == 1), "verified data space num dims(1)"); + space_ndims = H5Sget_simple_extent_ndims(space_id); + VRFY((space_ndims == 1), "verified data space num dims(1)"); - space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); - VRFY((space_ndims == 1), "verified data space num dims(2)"); - VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); + space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); + VRFY((space_ndims == 1), "verified data space num dims(2)"); + VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); - vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); + vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); VRFY((vector_ptr != NULL), "allocated in memory rep of vector"); - err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, (void *)vector_ptr); + err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); VRFY((err >= 0), "read received vector."); - vector_ok = TRUE; - for(i = 0; i < mpi_size; i++) - if(vector_ptr[i] != i) + vector_ok = TRUE; + for (i = 0; i < mpi_size; i++) + if (vector_ptr[i] != i) vector_ok = FALSE; VRFY((vector_ok), "verified received vector."); - - /* 7) closes the core file and exit. */ + + HDfree(vector_ptr); + vector_ptr = NULL; + + /* 7) closes the core file and exit. */ err = H5Sclose(space_id); - VRFY((err >= 0), "closed data space."); + VRFY((err >= 0), "closed data space."); - err = H5Dclose(dset_id); - VRFY((err >= 0), "closed data set."); + err = H5Dclose(dset_id); + VRFY((err >= 0), "closed data set."); - err = H5Fclose(file_id); - VRFY((err >= 0), "closed core file(1)."); + err = H5Fclose(file_id); + VRFY((err >= 0), "closed core file(1)."); - err = H5Pclose(fapl_id); - VRFY((err >= 0), "closed fapl(1)."); + err = H5Pclose(fapl_id); + VRFY((err >= 0), "closed fapl(1)."); HDfree(image_ptr); image_ptr = NULL; image_len = 0; - } else { + } + else { /* 1) Await receipt of file image from process (i - 1). */ - mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), - MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, - &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), \ - "received image size from process mpi_rank-1"); + mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0, + MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1"); image_ptr = (void *)HDmalloc((size_t)image_len); VRFY(image_ptr != NULL, "allocated file image receive buffer."); - mpi_result = MPI_Recv((void *)image_ptr, (int)image_len, - MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, - &rcvstat); - VRFY((mpi_result == MPI_SUCCESS), \ - "received file image from process mpi_rank-1"); - - /* 2) Open the image with the core file driver, verify that it - * contains a vector v of length, and that v[j] = j for - * 0 <= j < i, and that v[j] == -1 for i <= j < n - */ - fapl_id = H5Pcreate(H5P_FILE_ACCESS); + mpi_result = + MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat); + VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1"); + + /* 2) Open the image with the core file driver, verify that it + * contains a vector v of length, and that v[j] = j for + * 0 <= j < i, and that v[j] == -1 for i <= j < n + */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl_id >= 0), "creating fapl"); - err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); + err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE); VRFY((err >= 0), "setting core file driver in fapl."); - err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); + err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len); VRFY((err >= 0), "set file image in fapl."); file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id); - H5Eprint2(H5P_DEFAULT, stderr); + H5Eprint2(H5P_DEFAULT, stderr); VRFY((file_id >= 0), "opened received file image file"); - dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); + dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT); VRFY((dset_id >= 0), "opened data set"); - dset_type_id = H5Dget_type(dset_id); + dset_type_id = H5Dget_type(dset_id); VRFY((dset_type_id >= 0), "obtained data set type"); - tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); + tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT); VRFY((tri_result == TRUE), "verified data set type"); - space_id = H5Dget_space(dset_id); + space_id = H5Dget_space(dset_id); VRFY((space_id >= 0), "opened data space"); - space_ndims = H5Sget_simple_extent_ndims(space_id); - VRFY((space_ndims == 1), "verified data space num dims(1)"); + space_ndims = H5Sget_simple_extent_ndims(space_id); + VRFY((space_ndims == 1), "verified data space num dims(1)"); - space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); - VRFY((space_ndims == 1), "verified data space num dims(2)"); - VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); + space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL); + VRFY((space_ndims == 1), "verified data space num dims(2)"); + VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims"); - vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); + vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int)); VRFY((vector_ptr != NULL), "allocated in memory rep of vector"); - err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, (void *)vector_ptr); + err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); VRFY((err >= 0), "read received vector."); - vector_ok = TRUE; - for(i = 0; i < mpi_size; i++){ - if(i < mpi_rank) { - if(vector_ptr[i] != i) + vector_ok = TRUE; + for (i = 0; i < mpi_size; i++) { + if (i < mpi_rank) { + if (vector_ptr[i] != i) vector_ok = FALSE; - } else { - if(vector_ptr[i] != -1) + } + else { + if (vector_ptr[i] != -1) vector_ok = FALSE; - } + } } VRFY((vector_ok), "verified received vector."); - - /* 3) Set v[i] = i in the core file. */ + /* 3) Set v[i] = i in the core file. */ - vector_ptr[mpi_rank] = mpi_rank; + vector_ptr[mpi_rank] = mpi_rank; - err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, (void *)vector_ptr); + err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr); VRFY((err >= 0), "wrote modified data to vector."); HDfree(vector_ptr); vector_ptr = NULL; - - /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */ + /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */ - err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); + err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "flushed core file."); image_len = H5Fget_file_image(file_id, NULL, (size_t)0); @@ -358,38 +338,33 @@ file_image_daisy_chain_test(void) bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len); VRFY(bytes_read == image_len, "wrote file into image buffer"); - mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), - MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, - MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), \ - "sent image size to process (mpi_rank + 1) % mpi_size"); + mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, + (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD); + VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size"); - mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, - MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, + mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS), \ - "sent image to process (mpi_rank + 1) % mpi_size"); + VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size"); HDfree(image_ptr); image_ptr = NULL; image_len = 0; - - /* 5) close the core file and exit. */ + + /* 5) close the core file and exit. */ err = H5Sclose(space_id); - VRFY((err >= 0), "closed data space."); + VRFY((err >= 0), "closed data space."); - err = H5Dclose(dset_id); - VRFY((err >= 0), "closed data set."); + err = H5Dclose(dset_id); + VRFY((err >= 0), "closed data set."); - err = H5Fclose(file_id); - VRFY((err >= 0), "closed core file(1)."); + err = H5Fclose(file_id); + VRFY((err >= 0), "closed core file(1)."); - err = H5Pclose(fapl_id); - VRFY((err >= 0), "closed fapl(1)."); + err = H5Pclose(fapl_id); + VRFY((err >= 0), "closed fapl(1)."); } return; } /* file_image_daisy_chain_test() */ - diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c index 5e1cd04..0781594 100644 --- a/testpar/t_filter_read.c +++ b/testpar/t_filter_read.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -21,27 +18,25 @@ * Date: 2007/05/15 */ - #include "testphdf5.h" #ifdef H5_HAVE_SZLIB_H -# include "szlib.h" +#include "szlib.h" #endif static int mpi_size, mpi_rank; /* Chunk sizes */ -#define CHUNK_DIM1 7 -#define CHUNK_DIM2 27 +#define CHUNK_DIM1 7 +#define CHUNK_DIM2 27 /* Sizes of the vertical hyperslabs. Total dataset size is {HS_DIM1, HS_DIM2 * mpi_size } */ -#define HS_DIM1 200 -#define HS_DIM2 100 +#define HS_DIM1 200 +#define HS_DIM2 100 - /*------------------------------------------------------------------------- - * Function: filter_read_internal + * Function: filter_read_internal * * Purpose: Tests parallel reading of a 2D dataset written serially using * filters. During the parallel reading phase, the dataset is @@ -53,80 +48,79 @@ static int mpi_size, mpi_rank; *------------------------------------------------------------------------- */ static void -filter_read_internal(const char *filename, hid_t dcpl, - hsize_t *dset_size) +filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size) { - hid_t file, dataset; /* HDF5 IDs */ - hid_t access_plist; /* Access property list ID */ - hid_t sid, memspace; /* Dataspace IDs */ - hsize_t size[2]; /* Dataspace dimensions */ - hsize_t hs_offset[2]; /* Hyperslab offset */ - hsize_t hs_size[2]; /* Hyperslab size */ - size_t i, j; /* Local index variables */ - char name[32] = "dataset"; - herr_t hrc; /* Error status */ - int *points = NULL; /* Writing buffer for entire dataset */ - int *check = NULL; /* Reading buffer for selected hyperslab */ + hid_t file, dataset; /* HDF5 IDs */ + hid_t access_plist; /* Access property list ID */ + hid_t sid, memspace; /* Dataspace IDs */ + hsize_t size[2]; /* Dataspace dimensions */ + hsize_t hs_offset[2]; /* Hyperslab offset */ + hsize_t hs_size[2]; /* Hyperslab size */ + size_t i, j; /* Local index variables */ + char name[32] = "dataset"; + herr_t hrc; /* Error status */ + int *points = NULL; /* Writing buffer for entire dataset */ + int *check = NULL; /* Reading buffer for selected hyperslab */ /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* set sizes for dataset and hyperslabs */ hs_size[0] = size[0] = HS_DIM1; - hs_size[1] = HS_DIM2; + hs_size[1] = HS_DIM2; - size[1] = hs_size[1] * mpi_size; + size[1] = hs_size[1] * (hsize_t)mpi_size; hs_offset[0] = 0; - hs_offset[1] = hs_size[1] * mpi_rank; + hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank; /* Create the data space */ sid = H5Screate_simple(2, size, NULL); - VRFY(sid>=0, "H5Screate_simple"); + VRFY(sid >= 0, "H5Screate_simple"); /* Create buffers */ points = (int *)HDmalloc(size[0] * size[1] * sizeof(int)); - VRFY(points!=NULL, "HDmalloc"); + VRFY(points != NULL, "HDmalloc"); check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int)); - VRFY(check!=NULL, "HDmalloc"); + VRFY(check != NULL, "HDmalloc"); /* Initialize writing buffer with random data */ - for(i = 0; i < size[0]; i++) - for(j = 0; j < size[1]; j++) - points[i * size[1]+j] = (int)(i+j+7); + for (i = 0; i < size[0]; i++) + for (j = 0; j < size[1]; j++) + points[i * size[1] + j] = (int)(i + j + 7); VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability"); /* Serial write phase */ - if(MAINPROCESS) { + if (MAINPROCESS) { file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - VRFY(file>=0, "H5Fcreate"); + VRFY(file >= 0, "H5Fcreate"); /* Create the dataset */ dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY(dataset>=0, "H5Dcreate2"); + VRFY(dataset >= 0, "H5Dcreate2"); hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points); - VRFY(hrc>=0, "H5Dwrite"); + VRFY(hrc >= 0, "H5Dwrite"); *dset_size = H5Dget_storage_size(dataset); - VRFY(*dset_size>0, "H5Dget_storage_size"); + VRFY(*dset_size > 0, "H5Dget_storage_size"); - hrc = H5Dclose (dataset); - VRFY(hrc>=0, "H5Dclose"); + hrc = H5Dclose(dataset); + VRFY(hrc >= 0, "H5Dclose"); - hrc = H5Fclose (file); - VRFY(hrc>=0, "H5Fclose"); + hrc = H5Fclose(file); + VRFY(hrc >= 0, "H5Fclose"); } MPI_Barrier(MPI_COMM_WORLD); /* Parallel read phase */ /* Set up MPIO file access property lists */ - access_plist = H5Pcreate(H5P_FILE_ACCESS); + access_plist = H5Pcreate(H5P_FILE_ACCESS); VRFY((access_plist >= 0), "H5Pcreate"); hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL); @@ -140,51 +134,48 @@ filter_read_internal(const char *filename, hid_t dcpl, VRFY((dataset >= 0), "H5Dopen2"); hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL); - VRFY(hrc>=0, "H5Sselect_hyperslab"); + VRFY(hrc >= 0, "H5Sselect_hyperslab"); memspace = H5Screate_simple(2, hs_size, NULL); - VRFY(memspace>=0, "H5Screate_simple"); + VRFY(memspace >= 0, "H5Screate_simple"); - hrc = H5Dread (dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check); - VRFY(hrc>=0, "H5Dread"); + hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check); + VRFY(hrc >= 0, "H5Dread"); /* Check that the values read are the same as the values written */ - for (i=0; i<hs_size[0]; i++) { - for (j=0; j<hs_size[1]; j++) { - if(points[i*size[1]+(size_t)hs_offset[1]+j] != - check[i*hs_size[1]+j]) { - fprintf(stderr," Read different values than written.\n"); - fprintf(stderr," At index %lu,%lu\n", - (unsigned long)(i), - (unsigned long)(hs_offset[1]+j)); - fprintf(stderr," At original: %d\n", - (int)points[i*size[1]+(size_t)hs_offset[1]+j]); - fprintf(stderr," At returned: %d\n", - (int)check[i*hs_size[1]+j]); - VRFY(FALSE, ""); - } - } + for (i = 0; i < hs_size[0]; i++) { + for (j = 0; j < hs_size[1]; j++) { + if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) { + HDfprintf(stderr, " Read different values than written.\n"); + HDfprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i), + (unsigned long)(hs_offset[1] + j)); + HDfprintf(stderr, " At original: %d\n", + (int)points[i * size[1] + (size_t)hs_offset[1] + j]); + HDfprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]); + VRFY(FALSE, ""); + } + } } /* Get the storage size of the dataset */ - *dset_size=H5Dget_storage_size(dataset); - VRFY(*dset_size!=0, "H5Dget_storage_size"); + *dset_size = H5Dget_storage_size(dataset); + VRFY(*dset_size != 0, "H5Dget_storage_size"); /* Clean up objects used for this test */ - hrc = H5Dclose (dataset); - VRFY(hrc>=0, "H5Dclose"); + hrc = H5Dclose(dataset); + VRFY(hrc >= 0, "H5Dclose"); - hrc = H5Sclose (sid); - VRFY(hrc>=0, "H5Sclose"); + hrc = H5Sclose(sid); + VRFY(hrc >= 0, "H5Sclose"); - hrc = H5Sclose (memspace); - VRFY(hrc>=0, "H5Sclose"); + hrc = H5Sclose(memspace); + VRFY(hrc >= 0, "H5Sclose"); - hrc = H5Pclose (access_plist); - VRFY(hrc>=0, "H5Pclose"); + hrc = H5Pclose(access_plist); + VRFY(hrc >= 0, "H5Pclose"); - hrc = H5Fclose (file); - VRFY(hrc>=0, "H5Fclose"); + hrc = H5Fclose(file); + VRFY(hrc >= 0, "H5Fclose"); HDfree(points); HDfree(check); @@ -192,14 +183,13 @@ filter_read_internal(const char *filename, hid_t dcpl, MPI_Barrier(MPI_COMM_WORLD); } - /*------------------------------------------------------------------------- * Function: test_filter_read * - * Purpose: Tests parallel reading of datasets written serially using + * Purpose: Tests parallel reading of datasets written serially using * several (combinations of) filters. * - * Programmer: Christian Chilan + * Programmer: Christian Chilan * Tuesday, May 15, 2007 * * Modifications: @@ -210,119 +200,152 @@ filter_read_internal(const char *filename, hid_t dcpl, void test_filter_read(void) { - hid_t dc; /* HDF5 IDs */ - const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ - hsize_t null_size; /* Size of dataset without filters */ - herr_t hrc; - const char *filename; - hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ + hid_t dc; /* HDF5 IDs */ + const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */ + hsize_t null_size; /* Size of dataset without filters */ + unsigned chunk_opts; /* Chunk options */ + unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ + herr_t hrc; + const char *filename; +#ifdef H5_HAVE_FILTER_FLETCHER32 + hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */ +#endif #ifdef H5_HAVE_FILTER_DEFLATE - hsize_t deflate_size; /* Size of dataset with deflate filter */ -#endif /* H5_HAVE_FILTER_DEFLATE */ + hsize_t deflate_size; /* Size of dataset with deflate filter */ +#endif /* H5_HAVE_FILTER_DEFLATE */ #ifdef H5_HAVE_FILTER_SZIP - hsize_t szip_size; /* Size of dataset with szip filter */ - unsigned szip_options_mask=H5_SZIP_NN_OPTION_MASK; - unsigned szip_pixels_per_block=4; + hsize_t szip_size; /* Size of dataset with szip filter */ + unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK; + unsigned szip_pixels_per_block = 4; #endif /* H5_HAVE_FILTER_SZIP */ - hsize_t shuffle_size; /* Size of dataset with shuffle filter */ + hsize_t shuffle_size; /* Size of dataset with shuffle filter */ -#if(defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP) - hsize_t combo_size; /* Size of dataset with multiple filters */ -#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */ +#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP) + hsize_t combo_size; /* Size of dataset with multiple filters */ +#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */ filename = GetTestParameters(); - if(VERBOSE_MED) - printf("Parallel reading of dataset written with filters %s\n", filename); + if (VERBOSE_MED) + HDprintf("Parallel reading of dataset written with filters %s\n", filename); /*---------------------------------------------------------- * STEP 0: Test without filters. *---------------------------------------------------------- */ dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0,"H5Pcreate"); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0,"H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - filter_read_internal(filename,dc,&null_size); + filter_read_internal(filename, dc, &null_size); /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0,"H5Pclose"); + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); - /*---------------------------------------------------------- - * STEP 1: Test Fletcher32 Checksum by itself. - *---------------------------------------------------------- - */ + /* Run steps 1-3 both with and without filters disabled on partial chunks */ + for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1; + disable_partial_chunk_filters++) { + /* Set chunk options appropriately */ + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0,"H5Pset_filter"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_filter"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0,"H5Pset_filter"); + hrc = H5Pget_chunk_opts(dc, &chunk_opts); + VRFY(hrc >= 0, "H5Pget_chunk_opts"); - hrc = H5Pset_filter (dc,H5Z_FILTER_FLETCHER32,0,0,NULL); - VRFY(hrc>=0,"H5Pset_filter"); + if (disable_partial_chunk_filters) + chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; - filter_read_internal(filename,dc,&fletcher32_size); - VRFY(fletcher32_size > null_size,"Size after checksumming is incorrect."); + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + /*---------------------------------------------------------- + * STEP 1: Test Fletcher32 Checksum by itself. + *---------------------------------------------------------- + */ +#ifdef H5_HAVE_FILTER_FLETCHER32 + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pset_filter"); - /*---------------------------------------------------------- - * STEP 2: Test deflation by itself. - *---------------------------------------------------------- - */ + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_filter"); + + hrc = H5Pset_chunk_opts(dc, chunk_opts); + VRFY(hrc >= 0, "H5Pset_chunk_opts"); + + hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL); + VRFY(hrc >= 0, "H5Pset_filter"); + + filter_read_internal(filename, dc, &fletcher32_size); + VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect."); + + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + +#endif /* H5_HAVE_FILTER_FLETCHER32 */ + + /*---------------------------------------------------------- + * STEP 2: Test deflation by itself. + *---------------------------------------------------------- + */ #ifdef H5_HAVE_FILTER_DEFLATE - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_deflate (dc, 6); - VRFY(hrc>=0, "H5Pset_deflate"); + hrc = H5Pset_chunk_opts(dc, chunk_opts); + VRFY(hrc >= 0, "H5Pset_chunk_opts"); - filter_read_internal(filename,dc,&deflate_size); + hrc = H5Pset_deflate(dc, 6); + VRFY(hrc >= 0, "H5Pset_deflate"); - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + filter_read_internal(filename, dc, &deflate_size); -#endif /* H5_HAVE_FILTER_DEFLATE */ + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); +#endif /* H5_HAVE_FILTER_DEFLATE */ - /*---------------------------------------------------------- - * STEP 3: Test szip compression by itself. - *---------------------------------------------------------- - */ + /*---------------------------------------------------------- + * STEP 3: Test szip compression by itself. + *---------------------------------------------------------- + */ #ifdef H5_HAVE_FILTER_SZIP - if(h5_szip_can_encode() == 1) { - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + if (h5_szip_can_encode() == 1) { + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc>=0, "H5Pset_szip"); + hrc = H5Pset_chunk_opts(dc, chunk_opts); + VRFY(hrc >= 0, "H5Pset_chunk_opts"); - filter_read_internal(filename,dc,&szip_size); + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc >= 0, "H5Pset_szip"); - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); - } -#endif /* H5_HAVE_FILTER_SZIP */ + filter_read_internal(filename, dc, &szip_size); + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); + } +#endif /* H5_HAVE_FILTER_SZIP */ + } /* end for */ /*---------------------------------------------------------- * STEP 4: Test shuffling by itself. @@ -330,21 +353,20 @@ test_filter_read(void) */ dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_shuffle (dc); - VRFY(hrc>=0, "H5Pset_shuffle"); + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); - filter_read_internal(filename,dc,&shuffle_size); - VRFY(shuffle_size==null_size,"Shuffled size not the same as uncompressed size."); + filter_read_internal(filename, dc, &shuffle_size); + VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size."); /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); - + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); /*---------------------------------------------------------- * STEP 5: Test shuffle + deflate + checksum in any order. @@ -353,47 +375,47 @@ test_filter_read(void) #ifdef H5_HAVE_FILTER_DEFLATE /* Testing shuffle+deflate+checksum filters (checksum first) */ dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_fletcher32 (dc); - VRFY(hrc>=0, "H5Pset_fletcher32"); + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); - hrc = H5Pset_shuffle (dc); - VRFY(hrc>=0, "H5Pset_shuffle"); + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); - hrc = H5Pset_deflate (dc, 6); - VRFY(hrc>=0, "H5Pset_deflate"); + hrc = H5Pset_deflate(dc, 6); + VRFY(hrc >= 0, "H5Pset_deflate"); - filter_read_internal(filename,dc,&combo_size); + filter_read_internal(filename, dc, &combo_size); /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); /* Testing shuffle+deflate+checksum filters (checksum last) */ dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_shuffle (dc); - VRFY(hrc>=0, "H5Pset_shuffle"); + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); - hrc = H5Pset_deflate (dc, 6); - VRFY(hrc>=0, "H5Pset_deflate"); + hrc = H5Pset_deflate(dc, 6); + VRFY(hrc >= 0, "H5Pset_deflate"); - hrc = H5Pset_fletcher32 (dc); - VRFY(hrc>=0, "H5Pset_fletcher32"); + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); - filter_read_internal(filename,dc,&combo_size); + filter_read_internal(filename, dc, &combo_size); /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); #endif /* H5_HAVE_FILTER_DEFLATE */ @@ -405,54 +427,53 @@ test_filter_read(void) /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */ dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_fletcher32 (dc); - VRFY(hrc>=0, "H5Pset_fletcher32"); + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); - hrc = H5Pset_shuffle (dc); - VRFY(hrc>=0, "H5Pset_shuffle"); + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); /* Make sure encoding is enabled */ - if(h5_szip_can_encode() == 1) { - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc>=0, "H5Pset_szip"); + if (h5_szip_can_encode() == 1) { + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc >= 0, "H5Pset_szip"); - filter_read_internal(filename,dc,&combo_size); + filter_read_internal(filename, dc, &combo_size); } /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */ /* Make sure encoding is enabled */ - if(h5_szip_can_encode() == 1) { - dc = H5Pcreate(H5P_DATASET_CREATE); - VRFY(dc>=0, "H5Pcreate"); + if (h5_szip_can_encode() == 1) { + dc = H5Pcreate(H5P_DATASET_CREATE); + VRFY(dc >= 0, "H5Pcreate"); - hrc = H5Pset_chunk (dc, 2, chunk_size); - VRFY(hrc>=0, "H5Pset_chunk"); + hrc = H5Pset_chunk(dc, 2, chunk_size); + VRFY(hrc >= 0, "H5Pset_chunk"); - hrc = H5Pset_shuffle (dc); - VRFY(hrc>=0, "H5Pset_shuffle"); + hrc = H5Pset_shuffle(dc); + VRFY(hrc >= 0, "H5Pset_shuffle"); - hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); - VRFY(hrc>=0, "H5Pset_szip"); + hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block); + VRFY(hrc >= 0, "H5Pset_szip"); - hrc = H5Pset_fletcher32 (dc); - VRFY(hrc>=0, "H5Pset_fletcher32"); + hrc = H5Pset_fletcher32(dc); + VRFY(hrc >= 0, "H5Pset_fletcher32"); - filter_read_internal(filename,dc,&combo_size); + filter_read_internal(filename, dc, &combo_size); - /* Clean up objects used for this test */ - hrc = H5Pclose (dc); - VRFY(hrc>=0, "H5Pclose"); + /* Clean up objects used for this test */ + hrc = H5Pclose(dc); + VRFY(hrc >= 0, "H5Pclose"); } #endif /* H5_HAVE_FILTER_SZIP */ } - diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c new file mode 100644 index 0000000..b4a4edb --- /dev/null +++ b/testpar/t_filters_parallel.c @@ -0,0 +1,8892 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Jordan Henderson + * 01/31/2017 + * + * This file contains tests for writing to and reading from + * datasets in parallel with filters applied to the data. + */ + +#include "t_filters_parallel.h" + +const char *FILENAME[] = {"t_filters_parallel", NULL}; +char filenames[1][256]; + +static MPI_Comm comm = MPI_COMM_WORLD; +static MPI_Info info = MPI_INFO_NULL; +static int mpi_rank; +static int mpi_size; + +int nerrors = 0; + +/* Arrays of filter ID values and filter names (should match each other) */ +H5Z_filter_t filterIDs[] = { + H5Z_FILTER_DEFLATE, H5Z_FILTER_SHUFFLE, H5Z_FILTER_FLETCHER32, + H5Z_FILTER_SZIP, H5Z_FILTER_NBIT, H5Z_FILTER_SCALEOFFSET, +}; + +const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"}; + +/* Function pointer typedef for test functions */ +typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id); + +/* Typedef for filter arguments for user-defined filters */ +typedef struct filter_options_t { + unsigned int flags; + size_t cd_nelmts; + const unsigned int cd_values[]; +} filter_options_t; + +/* + * Enum for verify_space_alloc_status which specifies + * how many chunks have been written to in a dataset + */ +typedef enum num_chunks_written_t { + DATASET_JUST_CREATED, + NO_CHUNKS_WRITTEN, + SOME_CHUNKS_WRITTEN, + ALL_CHUNKS_WRITTEN +} num_chunks_written_t; + +static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options); +static herr_t verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written); + +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES +/* Tests for writing data in parallel */ +static void test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +#endif + +/* Tests for reading data in parallel */ +static void test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id); +static void test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, + H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); + +/* + * Tests for attempting to round-trip the data going from + * + * written serially -> read in parallel + * + * and + * + * written in parallel -> read serially + */ +static void test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); + +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES +static void test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); + +/* Other miscellaneous tests */ +static void test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id); +static void test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +static void test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id); +#endif + +static test_func tests[] = { +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES + test_write_one_chunk_filtered_dataset, + test_write_filtered_dataset_no_overlap, + test_write_filtered_dataset_no_overlap_partial, + test_write_filtered_dataset_overlap, + test_write_filtered_dataset_single_unlim_dim_no_overlap, + test_write_filtered_dataset_single_unlim_dim_overlap, + test_write_filtered_dataset_multi_unlim_dim_no_overlap, + test_write_filtered_dataset_multi_unlim_dim_overlap, + test_write_filtered_dataset_single_no_selection, + test_write_filtered_dataset_all_no_selection, + test_write_filtered_dataset_point_selection, + test_write_filtered_dataset_interleaved_write, + test_write_transformed_filtered_dataset_no_overlap, + test_write_3d_filtered_dataset_no_overlap_separate_pages, + test_write_3d_filtered_dataset_no_overlap_same_pages, + test_write_3d_filtered_dataset_overlap, + test_write_cmpd_filtered_dataset_no_conversion_unshared, + test_write_cmpd_filtered_dataset_no_conversion_shared, + test_write_cmpd_filtered_dataset_type_conversion_unshared, + test_write_cmpd_filtered_dataset_type_conversion_shared, +#endif + test_read_one_chunk_filtered_dataset, + test_read_filtered_dataset_no_overlap, + test_read_filtered_dataset_overlap, + test_read_filtered_dataset_single_no_selection, + test_read_filtered_dataset_all_no_selection, + test_read_filtered_dataset_point_selection, + test_read_filtered_dataset_interleaved_read, + test_read_transformed_filtered_dataset_no_overlap, + test_read_3d_filtered_dataset_no_overlap_separate_pages, + test_read_3d_filtered_dataset_no_overlap_same_pages, + test_read_3d_filtered_dataset_overlap, + test_read_cmpd_filtered_dataset_no_conversion_unshared, + test_read_cmpd_filtered_dataset_no_conversion_shared, + test_read_cmpd_filtered_dataset_type_conversion_unshared, + test_read_cmpd_filtered_dataset_type_conversion_shared, + test_write_serial_read_parallel, +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES + test_write_parallel_read_serial, + test_shrinking_growing_chunks, + test_edge_chunks_no_overlap, + test_edge_chunks_overlap, + test_edge_chunks_partial_write, + test_fill_values, + test_fill_value_undefined, + test_fill_time_never, +#endif +}; + +/* + * Function to call the appropriate HDF5 filter-setting function + * depending on the given filter ID. Used to re-run the tests + * with different filters to check that the data still comes back + * correctly under a variety of circumstances, such as the + * Fletcher32 checksum filter increasing the size of the chunk. + */ +static herr_t +set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options) +{ + switch (filter_id) { + case H5Z_FILTER_DEFLATE: + return H5Pset_deflate(dcpl_id, DEFAULT_DEFLATE_LEVEL); + case H5Z_FILTER_SHUFFLE: + return H5Pset_shuffle(dcpl_id); + case H5Z_FILTER_FLETCHER32: + return H5Pset_fletcher32(dcpl_id); + case H5Z_FILTER_SZIP: { + unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK; + hsize_t chunk_dims[H5S_MAX_RANK] = {0}; + size_t i, chunk_nelemts; + + VRFY(H5Pget_chunk(dcpl_id, H5S_MAX_RANK, chunk_dims) >= 0, "H5Pget_chunk succeeded"); + + for (i = 0, chunk_nelemts = 1; i < H5S_MAX_RANK; i++) + if (chunk_dims[i] > 0) + chunk_nelemts *= chunk_dims[i]; + + if (chunk_nelemts < H5_SZIP_MAX_PIXELS_PER_BLOCK) { + /* + * Can't set SZIP for chunk of 1 data element. + * Pixels-per-block value must be both even + * and non-zero. + */ + if (chunk_nelemts == 1) + return SUCCEED; + + if ((chunk_nelemts % 2) == 0) + pixels_per_block = (unsigned)chunk_nelemts; + else + pixels_per_block = (unsigned)(chunk_nelemts - 1); + } + else + pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK; + + return H5Pset_szip(dcpl_id, 0, pixels_per_block); + } + case H5Z_FILTER_NBIT: + return H5Pset_nbit(dcpl_id); + case H5Z_FILTER_SCALEOFFSET: + return H5Pset_scaleoffset(dcpl_id, H5Z_SO_INT, 0); + default: { + if (!filter_options) + return FAIL; + + return H5Pset_filter(dcpl_id, filter_id, filter_options->flags, filter_options->cd_nelmts, + filter_options->cd_values); + } + } +} + +/* + * Function to verify the status of dataset storage space allocation + * based on the dataset's allocation time setting and how many chunks + * in the dataset have been written to. + */ +static herr_t +verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written) +{ + int nfilters; + herr_t ret_value = SUCCEED; + + VRFY(((nfilters = H5Pget_nfilters(dcpl_id)) >= 0), "H5Pget_nfilters succeeded"); + + /* + * Only verify space allocation status when there are filters + * in the dataset's filter pipeline. When filters aren't in the + * pipeline, the space allocation time and status can vary based + * on whether the file was created in parallel or serial mode. + */ + if (nfilters > 0) { + H5D_space_status_t space_status; + H5D_alloc_time_t alloc_time; + + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + VRFY((H5Dget_space_status(dset_id, &space_status) >= 0), "H5Dget_space_status succeeded"); + + switch (alloc_time) { + case H5D_ALLOC_TIME_EARLY: + /* + * Early space allocation should always result in the + * full dataset storage space being allocated. + */ + VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status"); + break; + case H5D_ALLOC_TIME_LATE: + /* + * Late space allocation should always result in the + * full dataset storage space being allocated when + * the dataset gets written to. However, if the dataset + * is extended the dataset's space allocation status + * can become partly allocated until the dataset is + * written to again. + */ + if (chunks_written == SOME_CHUNKS_WRITTEN || chunks_written == ALL_CHUNKS_WRITTEN) + VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED) || + (space_status == H5D_SPACE_STATUS_PART_ALLOCATED), + "verified space allocation status"); + else if (chunks_written == NO_CHUNKS_WRITTEN) + /* + * A special case where we wrote to a dataset that + * uses late space allocation, but the write was + * either a no-op (no selection in the dataset + * from any rank) or something caused the write to + * fail late in the process of performing the actual + * write. In either case, space should still have + * been allocated. + */ + VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status"); + else + VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status"); + break; + case H5D_ALLOC_TIME_DEFAULT: + case H5D_ALLOC_TIME_INCR: + /* + * Incremental space allocation should result in + * the dataset's storage space being incrementally + * allocated as chunks are written to. Once all chunks + * have been written to, the space allocation should be + * seen as fully allocated. + */ + if (chunks_written == SOME_CHUNKS_WRITTEN) + VRFY((space_status == H5D_SPACE_STATUS_PART_ALLOCATED), + "verified space allocation status"); + else if (chunks_written == ALL_CHUNKS_WRITTEN) + VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "verified space allocation status"); + else + VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status"); + break; + default: + if (MAINPROCESS) + MESG("unknown space allocation time"); + MPI_Abort(MPI_COMM_WORLD, 1); + } + } + + return ret_value; +} + +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES +/* + * Tests parallel write of filtered data in the special + * case where a dataset is composed of a single chunk. + * + * Programmer: Jordan Henderson + * 02/01/2017 + */ +static void +test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t start[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t stride[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to one-chunk filtered dataset"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS; + + filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + block[0] = sel_dims[0]; + block[1] = sel_dims[1]; + start[0] = ((hsize_t)mpi_rank * sel_dims[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS * + (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = ((C_DATATYPE)i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE)i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where only + * one process is writing to a particular chunk in the operation. + * In this case, the write operation can be optimized because + * chunks do not have to be redistributed to new owners. + * + * Programmer: Jordan Henderson + * 02/01/2017 + */ +static void +test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS; + + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = + (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where only + * one process is writing to a particular chunk in the operation + * and that process only writes to part of a chunk. + */ +static void +test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing partial write to unshared filtered chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS; + sel_dims[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS / + WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS); + + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS / + WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS); + stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS; + block[1] = (hsize_t)1; + start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < (size_t)mpi_size; i++) { + size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * + WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS)); + size_t data_idx = i; + + for (size_t j = 0; j < rank_n_elems; j++) { + if ((j % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) { + correct_buf[(i * rank_n_elems) + j] = (C_DATATYPE)data_idx; + data_idx++; + } + } + } + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * more than one process is writing to a particular chunk + * in the operation. In this case, the chunks have to be + * redistributed before the operation so that only one process + * writes to a particular chunk. + * + * Programmer: Jordan Henderson + * 02/01/2017 + */ +static void +test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to shared filtered chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS; + count[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = + (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * a dataset has a single unlimited dimension and each + * MPI rank writes to its own separate chunk. On each + * iteration, the dataset is extended in its extensible + * dimension by "MPI size" chunks per rank and the new + * chunks are written to, read back and verified. + */ +static void +test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t max_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks w/ single unlimited dimension"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS; + max_dims[0] = dataset_dims[0]; + max_dims[1] = H5S_UNLIMITED; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS; + + filespace = H5Screate_simple(WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + read_buf = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) { + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = + (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS / (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS; + stride[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * block[0] * count[0]); + start[1] = i * count[1] * block[1]; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], + block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + HDmemset(read_buf, 255, data_size); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* Verify the correct data was written */ + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) { + /* Extend the dataset by count[1] chunks in the extensible dimension */ + dataset_dims[1] += count[1] * block[1]; + VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + } + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + } + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * a dataset has a single unlimited dimension and each + * MPI rank writes to a portion of each chunk in the dataset. + * On each iteration, the dataset is extended in its extensible + * dimension by two chunks and the new chunks are written to + * by all ranks, then read back and verified. + */ +static void +test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t max_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to shared filtered chunks w/ single unlimited dimension"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NCOLS; + max_dims[0] = dataset_dims[0]; + max_dims[1] = H5S_UNLIMITED; + chunk_dims[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + filespace = H5Screate_simple(WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + read_buf = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) { + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NROWS / (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS; + count[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NCOLS / (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS; + stride[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS; + stride[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS; + block[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = i * count[1] * block[1]; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], + block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + HDmemset(read_buf, 255, data_size); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* Verify correct data was written */ + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) { + /* Extend the dataset by count[1] chunks in the extensible dimension */ + dataset_dims[1] += count[1] * block[1]; + VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + } + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + } + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * a dataset has two unlimited dimensions and each + * MPI rank writes to its own separate chunks. On each + * iteration, the dataset is extended in its first + * extensible dimension by the size of one chunk per rank + * and in its second extensible dimension by the size of + * one chunk. Then, all chunks are written to, read back + * and verified. + */ +static void +test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t max_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks w/ two unlimited dimensions"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS; + max_dims[0] = H5S_UNLIMITED; + max_dims[1] = H5S_UNLIMITED; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS; + + filespace = H5Screate_simple(WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + for (i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) { + C_DATATYPE *tmp_realloc = NULL; + size_t j; + + /* Set selected dimensions */ + sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; + sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + tmp_realloc = (C_DATATYPE *)HDrealloc(data, data_size); + VRFY((NULL != tmp_realloc), "HDrealloc succeeded"); + data = tmp_realloc; + + tmp_realloc = (C_DATATYPE *)HDrealloc(read_buf, data_size); + VRFY((NULL != tmp_realloc), "HDrealloc succeeded"); + read_buf = tmp_realloc; + + for (j = 0; j < data_size / sizeof(*data); j++) + data[j] = (C_DATATYPE)GEN_DATA(j); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (i + 1); + count[1] = (i + 1); + stride[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * block[0] * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], + block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + HDmemset(read_buf, 255, data_size); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* Verify the correct data was written */ + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) { + /* + * Extend the dataset by the size of one chunk per rank + * in the first extensible dimension. Extend the dataset + * by the size of chunk in the second extensible dimension. + */ + dataset_dims[0] += (hsize_t)mpi_size * block[0]; + dataset_dims[1] += block[1]; + VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + } + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + } + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * a dataset has two unlimited dimensions and each MPI + * rank writes to a portion of each chunk in the dataset. + * On each iteration, the dataset is extended in its extensible + * dimensions by the size of a chunk and then all chunks are + * written to by all ranks, then read back and verified. + */ +static void +test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t max_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to shared filtered chunks w/ two unlimited dimensions"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_NCOLS; + max_dims[0] = H5S_UNLIMITED; + max_dims[1] = H5S_UNLIMITED; + chunk_dims[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + filespace = H5Screate_simple(WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + for (i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) { + C_DATATYPE *tmp_realloc = NULL; + size_t j; + + /* Set selected dimensions */ + sel_dims[0] = (i + 1); + sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + tmp_realloc = (C_DATATYPE *)HDrealloc(data, data_size); + VRFY((NULL != tmp_realloc), "HDrealloc succeeded"); + data = tmp_realloc; + + tmp_realloc = (C_DATATYPE *)HDrealloc(read_buf, data_size); + VRFY((NULL != tmp_realloc), "HDrealloc succeeded"); + read_buf = tmp_realloc; + + for (j = 0; j < data_size / sizeof(*data); j++) + data[j] = (C_DATATYPE)GEN_DATA(j); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (i + 1); + count[1] = (i + 1); + stride[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; + stride[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], + block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + HDmemset(read_buf, 255, data_size); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* Verify correct data was written */ + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) { + /* Extend the dataset by the size of a chunk in each extensible dimension */ + dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS; + dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS; + VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + } + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + } + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * a single process in the write operation has no selection + * in the dataset's dataspace. In this case, the process with + * no selection still has to participate in the collective + * space re-allocation for the filtered chunks and also must + * participate in the re-insertion of the filtered chunks + * into the chunk index. + * + * Programmer: Jordan Henderson + * 02/01/2017 + */ +static void +test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + size_t segment_length; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to filtered chunks with a single process having no selection"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + sel_dims[0] = sel_dims[1] = 0; + + filespace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / + (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + else + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + if (mpi_rank != WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) { + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + } + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + + /* Compute the correct offset into the buffer for the process having no selection and clear it */ + segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size; + HDmemset(correct_buf + + ((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), + 0, segment_length * sizeof(*data)); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status - data should only have been written if MPI size > 1 */ + verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN)); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case + * where no process in the write operation has a + * selection in the dataset's dataspace. This test is + * to ensure that there are no assertion failures or + * similar issues due to size 0 allocations and the + * like. In this case, the file and dataset are created + * but the dataset is populated with the default fill + * value. + * + * Programmer: Jordan Henderson + * 02/02/2017 + */ +static void +test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to filtered chunks with all processes having no selection"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = sel_dims[1] = 0; + + filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status - no ranks should have written any data */ + verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data by using + * point selections instead of hyperslab selections. + * + * Programmer: Jordan Henderson + * 02/02/2017 + */ +static void +test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t *coords = NULL; + hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t i, j, data_size, correct_buf_size; + size_t num_points; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to filtered chunks with point selection"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + + filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Set up point selection */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + num_points = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * + (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size; + coords = (hsize_t *)HDcalloc(1, 2 * num_points * sizeof(*coords)); + VRFY((NULL != coords), "Coords HDcalloc succeeded"); + + for (i = 0; i < num_points; i++) + for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) + coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = + (j > 0) ? (i % (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS) + : ((hsize_t)mpi_rank + + ((hsize_t)mpi_size * (i / (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); + + VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0), + "Point selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = + (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (coords) + HDfree(coords); + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * each process writes an equal amount of data to each chunk + * in the dataset. Each chunk is distributed among the + * processes in round-robin fashion by blocks of size 1 until + * the whole chunk is selected, leading to an interleaved + * write pattern. + * + * Programmer: Jordan Henderson + * 02/02/2017 + */ +static void +test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t start[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t stride[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing interleaved write to filtered chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS; + chunk_dims[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS; + sel_dims[0] = (hsize_t)(INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / mpi_size); + sel_dims[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS; + + filespace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = + (hsize_t)(INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS); + count[1] = + (hsize_t)(INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS); + stride[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + /* Add Column Index */ + correct_buf[i] = + (C_DATATYPE)((i % (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + + /* Add the Row Index */ + + ((i % (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / + (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS) + + /* Add the amount that gets added when a rank moves down to its next section + vertically in the dataset */ + + ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * + (i / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of transformed and filtered data + * in the case where only one process is writing to a + * particular chunk in the operation. Normally, a data + * transform function will cause the parallel library to + * break to independent I/O and this isn't allowed when + * there are filters in the pipeline. However, in this + * case the parallel library recognizes that the used + * data transform function "x" is the same as not applying + * the transform function. Therefore it does not apply + * the transform function resulting in not breaking to + * independent I/O. + * + * Programmer: Jan-Willem Blokland + * 08/20/2021 + */ +static void +test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared transformed and filtered chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS; + + filespace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS / + (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + + /* Create property list for data transform */ + plist_id = H5Pcopy(dxpl_id); + VRFY((plist_id >= 0), "DXPL copy succeeded"); + + /* Set data transform expression */ + VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0), + "Dataset write succeeded"); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + + /* Verify space allocation status */ + plist_id = H5Dget_create_plist(dset_id); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * the dataset has 3 dimensions and each process writes + * to its own "page" in the 3rd dimension. + * + * Programmer: Jordan Henderson + * 02/06/2017 + */ +static void +test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + dataset_dims[2] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + sel_dims[2] = 1; + + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / + (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + count[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / + (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + count[2] = 1; + stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + block[2] = 1; + start[0] = 0; + start[1] = 0; + start[2] = (hsize_t)mpi_rank; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE + ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], + start[2], block[0], block[1], block[2]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size)); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * the dataset has 3 dimensions and each process writes + * to each "page" in the 3rd dimension. However, no chunk + * on a given "page" is written to by more than one process. + * + * Programmer: Jordan Henderson + * 02/06/2017 + */ +static void +test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + dataset_dims[2] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + sel_dims[2] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + + filespace = + H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / + (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + count[2] = (hsize_t)mpi_size; + stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + block[2] = 1; + start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); + start[1] = 0; + start[2] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE + ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], + start[2], block[0], block[1], block[2]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) + + (i / (dataset_dims[0] * dataset_dims[1]))); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data in the case where + * the dataset has 3 dimensions and each process writes + * to each "page" in the 3rd dimension. Further, each chunk + * in each "page" is written to equally by all processes. + * + * Programmer: Jordan Henderson + * 02/06/2017 + */ +static void +test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to shared filtered chunks in 3D dataset"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS; + dataset_dims[2] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH; + chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + chunk_dims[2] = 1; + sel_dims[0] = (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); + sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS; + sel_dims[2] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH; + + filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS); + count[1] = (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS); + count[2] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH; + stride[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + stride[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + stride[2] = 1; + block[0] = 1; + block[1] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + block[2] = 1; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + start[2] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE + ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], + start[2], block[0], block[1], block[2]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + /* Add the Column Index */ + correct_buf[i] = (C_DATATYPE)((i % (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the Row Index */ + + ((i % (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / + (hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the amount that gets added when a rank moves down to its next + section vertically in the dataset */ + + ((hsize_t)(WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS) * + (i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * + WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data to unshared + * chunks using a compound datatype which doesn't + * require a datatype conversion. + * + * Programmer: Jordan Henderson + * 02/10/2017 + */ +static void +test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, + memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype " + "conversion"); + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; + chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = + H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, + memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + start[0] = 0; + start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + data = (COMPOUND_C_DATATYPE *)HDcalloc( + 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + /* Fill data buffer */ + for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { + data[i].field1 = (short)GEN_DATA(i); + data[i].field2 = (int)GEN_DATA(i); + data[i].field3 = (long)GEN_DATA(i); + } + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1])); + + correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1])); + + correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1])); + } + + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = + H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data to shared + * chunks using a compound datatype which doesn't + * require a datatype conversion. + * + * Programmer: Jordan Henderson + * 02/10/2017 + */ +static void +test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, + memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype " + "conversion"); + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = + H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + data = (COMPOUND_C_DATATYPE *)HDcalloc( + 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + /* Fill data buffer */ + for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { + data[i].field1 = (short)GEN_DATA(i); + data[i].field2 = (int)GEN_DATA(i); + data[i].field3 = (long)GEN_DATA(i); + } + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = + (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + correct_buf[i].field2 = + (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + correct_buf[i].field3 = + (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + } + + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = + H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data to unshared + * chunks using a compound datatype which requires a + * datatype conversion. + * + * NOTE: This test currently should fail for mpi_size > 1 + * because the datatype conversion causes the parallel + * library to break to independent I/O and this isn't + * allowed when there are filters in the pipeline, + * unless there is only one MPI rank. + * + * Programmer: Jordan Henderson + * 02/07/2017 + */ +static void +test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, + filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype " + "conversion"); + + /* Skip for MPI communicator size of 1 */ + if (mpi_size == 1) { + SKIPPED(); + return; + } + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; + chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + /* Create the compound type for file. */ + filetype = H5Tcreate(H5T_COMPOUND, 32); + VRFY((filetype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + + dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, + filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + start[0] = 0; + start[1] = ((hsize_t)mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + data = (COMPOUND_C_DATATYPE *)HDcalloc( + 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + /* Fill data buffer */ + for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) { + data[i].field1 = (short)GEN_DATA(i); + data[i].field2 = (int)GEN_DATA(i); + data[i].field3 = (long)GEN_DATA(i); + } + + /* Ensure that this test currently fails since type conversions break collective mode */ + H5E_BEGIN_TRY + { + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded"); + } + H5E_END_TRY; + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + /* Verify that no data was written */ + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = + H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel write of filtered data to shared + * chunks using a compound datatype which requires + * a datatype conversion. + * + * NOTE: This test currently should fail for mpi_size > 1 + * because the datatype conversion causes the parallel + * library to break to independent I/O and this isn't + * allowed when there are filters in the pipeline, + * unless there is only one MPI rank. + * + * Programmer: Jordan Henderson + * 02/10/2017 + */ +static void +test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *data = NULL; + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + size_t i, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs( + "Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion"); + + /* Skip for MPI communicator size of 1 */ + if (mpi_size == 1) { + SKIPPED(); + return; + } + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + + filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = + H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + /* Create the compound type for file. */ + filetype = H5Tcreate(H5T_COMPOUND, 32); + VRFY((filetype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + + dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, + filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + data = (COMPOUND_C_DATATYPE *)HDcalloc( + 1, (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data)); + VRFY((NULL != data), "HDcalloc succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + /* Fill data buffer */ + for (i = 0; i < (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) { + data[i].field1 = (short)GEN_DATA(i); + data[i].field2 = (int)GEN_DATA(i); + data[i].field3 = (long)GEN_DATA(i); + } + + /* Ensure that this test currently fails since type conversions break collective mode */ + H5E_BEGIN_TRY + { + VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded"); + } + H5E_END_TRY; + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + /* Verify that no data was written */ + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + dset_id = + H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} +#endif + +/* + * Tests parallel read of filtered data in the special + * case where a dataset is composed of a single chunk. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the singular chunk and contributes its piece to a + * global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/14/2018 + */ +static void +test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t start[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t stride[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t count[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from one-chunk filtered dataset"); + + dataset_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = ((C_DATATYPE)i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)) + + ((C_DATATYPE)i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * + READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS)); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS; + block[0] = sel_dims[0]; + block[1] = sel_dims[1]; + start[0] = ((hsize_t)mpi_rank * sel_dims[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)flat_dims[0]; + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0]); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where only + * one process is reading from a particular chunk in the operation. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global buffer + * that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from unshared filtered chunks"); + + dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS * + (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and reads + * it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)flat_dims[0]; + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0]); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * more than one process is reading from a particular chunk + * in the operation. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each chunk of the dataset and contributes its pieces + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from shared filtered chunks"); + + dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = + (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + count[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* + * Since these chunks are shared, run multiple rounds of MPI_Allgatherv + * to collect all of the pieces into their appropriate locations. The + * number of times MPI_Allgatherv is run should be equal to the number + * of chunks in the first dimension of the dataset. + */ + { + size_t loop_count = count[0]; + size_t total_recvcounts = 0; + + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)dataset_dims[1]; + total_recvcounts += (size_t)recvcounts[i]; + } + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * dataset_dims[1]); + + for (; loop_count; loop_count--) { + VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], + recvcounts[mpi_rank], C_DATATYPE_MPI, + &global_buf[(count[0] - loop_count) * total_recvcounts], + recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + } + } + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * a single process in the read operation has no selection + * in the dataset's dataspace. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank (except for one) + * reads a part of the dataset and contributes its piece + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + size_t segment_length; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from filtered chunks with a single process having no selection"); + + dataset_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + + /* Compute the correct offset into the buffer for the process having no selection and clear it */ + segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t)mpi_size; + HDmemset(correct_buf + ((size_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length), + 0, segment_length * sizeof(*correct_buf)); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = + H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + sel_dims[0] = sel_dims[1] = 0; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / + (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + else + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) { + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, NULL) >= 0), + "Dataset read succeeded"); + } + else { + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + } + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * + READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS); + recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0; + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * (size_t)(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * + READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS)); + + if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, + C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + else + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, + recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * no process in the read operation has a selection in the + * dataset's dataspace. This test is to ensure that there + * are no assertion failures or similar issues due to size + * 0 allocations and the like. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank will simply issue + * a no-op read. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + size_t read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing read from filtered chunks with all processes having no selection"); + + dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = sel_dims[1] = 0; + + memspace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded"); + + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data by using point + * selections instead of hyperslab selections. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank will read part + * of the dataset using a point selection and will + * contribute its piece to a global buffer that is + * checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t *coords = NULL; + hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, j, read_buf_size, correct_buf_size; + size_t num_points; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from filtered chunks with point selection"); + + dataset_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = + (C_DATATYPE)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Set up point selection */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + num_points = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * + (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t)mpi_size; + coords = (hsize_t *)HDcalloc(1, 2 * num_points * sizeof(*coords)); + VRFY((NULL != coords), "Coords HDcalloc succeeded"); + + for (i = 0; i < num_points; i++) + for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++) + coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] = + (j > 0) ? (i % (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS) + : ((hsize_t)mpi_rank + + ((hsize_t)mpi_size * (i / (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS))); + + VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0), + "Point selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* + * Since these chunks are shared, run multiple rounds of MPI_Allgatherv + * to collect all of the pieces into their appropriate locations. The + * number of times MPI_Allgatherv is run should be equal to the number + * of chunks in the first dimension of the dataset. + */ + { + size_t original_loop_count = dataset_dims[0] / (hsize_t)mpi_size; + size_t cur_loop_count = original_loop_count; + size_t total_recvcounts = 0; + + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)dataset_dims[1]; + total_recvcounts += (size_t)recvcounts[i]; + } + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * dataset_dims[1]); + + for (; cur_loop_count; cur_loop_count--) { + VRFY((MPI_SUCCESS == + MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], + recvcounts[mpi_rank], C_DATATYPE_MPI, + &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts], + recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + } + } + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + HDfree(coords); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * each process reads an equal amount of data from each + * chunk in the dataset. Each chunk is distributed among the + * processes in round-robin fashion by blocks of size 1 until + * the whole chunk is selected, leading to an interleaved + * read pattern. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank will read part + * of each chunk of the dataset and will contribute its + * pieces to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/15/2018 + */ +static void +test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t start[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t stride[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t count[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing interleaved read from filtered chunks"); + + dataset_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NROWS; + dataset_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + /* Add Column Index */ + correct_buf[i] = + (C_DATATYPE)((i % (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + + /* Add the Row Index */ + + ((i % (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / + (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS) + + /* Add the amount that gets added when a rank moves down to its next section + vertically in the dataset */ + + ((hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS * + (i / (hsize_t)(mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)))); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS; + chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size); + sel_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = + (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS); + count[1] = + (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NCOLS / INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS); + stride[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS; + stride[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* + * Since these chunks are shared, run multiple rounds of MPI_Allgatherv + * to collect all of the pieces into their appropriate locations. The + * number of times MPI_Allgatherv is run should be equal to the number + * of chunks in the first dimension of the dataset. + */ + { + size_t loop_count = count[0]; + size_t total_recvcounts = 0; + + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)dataset_dims[1]; + total_recvcounts += (size_t)recvcounts[i]; + } + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * dataset_dims[1]); + + for (; loop_count; loop_count--) { + VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], + recvcounts[mpi_rank], C_DATATYPE_MPI, + &global_buf[(count[0] - loop_count) * total_recvcounts], + recvcounts, displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + } + } + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * the dataset has 3 dimensions and each process reads from + * its own "page" in the 3rd dimension. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads its own "page" + * of the dataset and contributes its piece to a global buffer + * that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ +static void +test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + MPI_Datatype vector_type; + MPI_Datatype resized_vector_type; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset"); + + dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size)); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = + H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY( + (H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS; + sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS; + sel_dims[2] = 1; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / + (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + count[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / + (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + count[2] = 1; + stride[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + stride[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS; + block[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS; + block[2] = 1; + start[0] = 0; + start[1] = 0; + start[2] = (hsize_t)mpi_rank; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* + * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each + * rank to write to the nth position of the global data buffer, where n is the rank number. + */ + VRFY((MPI_SUCCESS == MPI_Type_vector((int)flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)), + "MPI_Type_vector succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded"); + + /* + * Resize the type to allow interleaving, + * so make it only one MPI_LONG wide + */ + VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)), + "MPI_Type_create_resized"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded"); + + VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1, + resized_vector_type, comm)), + "MPI_Allgather succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded"); + + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of transformed and filtered data in the + * case where only one process is reading from a particular + * chunk in the operation. Normally, a data transform function + * will cause the parallel library to break to independent I/O + * and this isn't allowed when there are filters in the pipeline. + * However, in this case the parallel library recognizes that + * the used data transform function "x" is the same as not + * applying the transform function. Therefore it does not apply + * the transform function resulting in not breaking to + * independent I/O. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global buffer + * that is checked for consistency. + * + * Programmer: Jan-Willem Blokland + * 08/20/2021 + */ +static void +test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from unshared transformed and filtered chunks"); + + dataset_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS * + (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) + + (i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1]))); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = + H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY( + (H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Create property list for collective dataset read */ + plist_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((plist_id >= 0), "DXPL creation succeeded"); + + /* Set data transform expression */ + VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, correct_buf) >= 0), + "Dataset write succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + + /* Verify space allocation status */ + plist_id = H5Dget_create_plist(dset_id); + VRFY((plist_id >= 0), "H5Dget_create_plist succeeded"); + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and reads + * it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS / + (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS; + block[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Create property list for data transform */ + plist_id = H5Pcopy(dxpl_id); + VRFY((plist_id >= 0), "DXPL copy succeeded"); + + /* Set data transform expression */ + VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)flat_dims[0]; + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0]); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * the dataset has 3 dimensions and each process reads from + * each "page" in the 3rd dimension. However, no chunk on a + * given "page" is read from by more than one process. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each "page" of the dataset and contributes its piece to a + * global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ +static void +test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset"); + + dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS; + dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) + + (i / (dataset_dims[0] * dataset_dims[1]))); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = + H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= + 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, + HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + sel_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS; + sel_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / + (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + count[2] = (hsize_t)mpi_size; + stride[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + stride[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS; + block[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS; + block[2] = 1; + start[0] = ((hsize_t)mpi_rank * (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]); + start[1] = 0; + start[2] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)flat_dims[0]; + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0]); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data in the case where + * the dataset has 3 dimensions and each process reads from + * each "page" in the 3rd dimension. Further, each chunk in + * each "page" is read from equally by all processes. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads part of each + * chunk of each "page" and contributes its pieces to a + * global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/16/2018 + */ +static void +test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + MPI_Datatype vector_type; + MPI_Datatype resized_vector_type; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t start[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t stride[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing read from shared filtered chunks in 3D dataset"); + + dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NROWS; + dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS; + dataset_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + /* Add the Column Index */ + correct_buf[i] = (C_DATATYPE)((i % (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the Row Index */ + + ((i % (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) / + (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)) + + /* Add the amount that gets added when a rank moves down to its next + section vertically in the dataset */ + + ((hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS) * + (i / (hsize_t)(mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * + READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + chunk_dims[2] = 1; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); + sel_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS; + sel_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS); + count[1] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NCOLS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS); + count[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH; + stride[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS; + stride[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + stride[2] = 1; + block[0] = 1; + block[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS; + block[2] = 1; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + start[2] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + { + size_t run_length = + (size_t)(READ_SHARED_FILTERED_CHUNKS_3D_NCOLS * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH); + size_t num_blocks = (size_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size); + + /* + * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each + * rank to write to the nth position of the global data buffer, where n is the rank number. + */ + VRFY( + (MPI_SUCCESS == MPI_Type_vector((int)num_blocks, (int)run_length, + (int)(mpi_size * (int)run_length), C_DATATYPE_MPI, &vector_type)), + "MPI_Type_vector succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded"); + + /* + * Resize the type to allow interleaving, + * so make it "run_length" MPI_LONGs wide + */ + VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint)(run_length * sizeof(long)), + &resized_vector_type)), + "MPI_Type_create_resized"); + VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded"); + } + + VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int)flat_dims[0], C_DATATYPE_MPI, global_buf, 1, + resized_vector_type, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded"); + VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded"); + + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data to unshared + * chunks using a compound datatype which doesn't + * require a datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global + * buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ +static void +test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, + memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype " + "conversion"); + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1])); + + correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1])); + + correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1])); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, + memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = + H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS; + start[0] = 0; + start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, + global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data from shared + * chunks using a compound datatype which doesn't + * require a datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each chunk of the dataset and contributes its piece + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ +static void +test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID, + memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype " + "conversion"); + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = + (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + correct_buf[i].field2 = + (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + correct_buf[i].field3 = + (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, + memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = + H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, + global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data from unshared + * chunks using a compound datatype which requires a + * datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * the dataset and contributes its piece to a global + * buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ +static void +test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype " + "conversion"); + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS; + dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = (short)((i % dataset_dims[1]) + (i / dataset_dims[1])); + + correct_buf[i].field2 = (int)((i % dataset_dims[1]) + (i / dataset_dims[1])); + + correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1])); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + /* Create the compound type for file. */ + filetype = H5Tcreate(H5T_COMPOUND, 32); + VRFY((filetype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, + filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = + H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; + stride[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + block[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS; + start[0] = 0; + start[1] = ((hsize_t)mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, + global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests parallel read of filtered data from shared + * chunks using a compound datatype which requires + * a datatype conversion. + * + * The MAINPROCESS rank will first write out all of the + * data to the dataset. Then, each rank reads a part of + * each chunk of the dataset and contributes its pieces + * to a global buffer that is checked for consistency. + * + * Programmer: Jordan Henderson + * 05/17/2018 + */ +static void +test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id, + hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id) +{ + COMPOUND_C_DATATYPE *read_buf = NULL; + COMPOUND_C_DATATYPE *correct_buf = NULL; + COMPOUND_C_DATATYPE *global_buf = NULL; + hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS]; + hsize_t flat_dims[1]; + size_t i, read_buf_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs( + "Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion"); + + /* SZIP and ScaleOffset filters don't support compound types */ + if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS; + dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS; + + /* Setup the buffer for writing and for comparison */ + correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf); + + correct_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) { + correct_buf[i].field1 = + (short)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + correct_buf[i].field2 = + (int)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + + correct_buf[i].field3 = + (long)((dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) + + (((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])); + } + + /* Create the compound type for memory. */ + memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE)); + VRFY((memtype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0), + "Datatype insertion succeeded"); + VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0), + "Datatype insertion succeeded"); + + /* Create the compound type for file. */ + filetype = H5Tcreate(H5T_COMPOUND, 32); + VRFY((filetype >= 0), "Datatype creation succeeded"); + + VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded"); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, + dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, + chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, + filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = + H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + sel_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + + /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */ + flat_dims[0] = sel_dims[0] * sel_dims[1]; + + memspace = H5Screate_simple(1, flat_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + /* + * Each process defines the dataset selection in the file and + * reads it to the selection in memory + */ + count[0] = 1; + count[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; + stride[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS; + stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + block[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size; + block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is reading with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + read_buf_size = flat_dims[0] * sizeof(*read_buf); + + read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded"); + + global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != global_buf), "HDcalloc succeeded"); + + /* Collect each piece of data from all ranks into a global buffer on all ranks */ + recvcounts = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + recvcounts[i] = (int)(flat_dims[0] * sizeof(*read_buf)); + + displs = (int *)HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) + displs[i] = (int)(i * flat_dims[0] * sizeof(*read_buf)); + + VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int)(flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, + global_buf, recvcounts, displs, MPI_BYTE, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (global_buf) + HDfree(global_buf); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests write of filtered data to a dataset + * by a single process. After the write has + * succeeded, the dataset is closed and then + * re-opened in parallel and read by all + * processes to ensure data correctness. + * + * Programmer: Jordan Henderson + * 08/03/2017 + */ +static void +test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write file serially; read file in parallel"); + + dataset_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NCOLS; + dataset_dims[2] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_DEPTH; + + /* Write the file on the MAINPROCESS rank */ + if (MAINPROCESS) { + /* Set up file access property list */ + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS; + chunk_dims[2] = 1; + + filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (long)i; + + /* All ranks open the file and verify their "portion" of the dataset is correct */ + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + if (correct_buf) + HDfree(correct_buf); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES +/* + * Tests parallel write of filtered data + * to a dataset. After the write has + * succeeded, the dataset is closed and + * then re-opened and read by a single + * process to ensure data correctness. + * + * Programmer: Jordan Henderson + * 08/03/2017 + */ +static void +test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, + hid_t dcpl_id, hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t sel_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t count[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t stride[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS]; + size_t i, data_size, correct_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write file in parallel; read serially"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NROWS; + dataset_dims[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NCOLS; + dataset_dims[2] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_DEPTH; + chunk_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + chunk_dims[2] = 1; + sel_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NCOLS; + sel_dims[2] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_DEPTH; + + filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + count[2] = (hsize_t)mpi_size; + stride[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + stride[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + stride[2] = 1; + block[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS; + block[1] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NCOLS; + block[2] = 1; + offset[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]); + offset[1] = 0; + offset[2] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ], stride[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE " ], offset[ %" PRIuHSIZE + ", %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE + " ]\n", + mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], + offset[1], offset[2], block[0], block[1], block[2]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (data) + HDfree(data); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + if (MAINPROCESS) { + plist_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((plist_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + dset_id = H5Dopen2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf); + + correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) + correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) + + (i / (dataset_dims[0] * dataset_dims[1]))); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + HDfree(correct_buf); + HDfree(read_buf); + } + + return; +} + +/* + * Tests that causing chunks to continually grow and shrink + * by writing random data followed by zeroed-out data (and + * thus controlling the compression ratio) does not cause + * problems. + * + * Programmer: Jordan Henderson + * 06/04/2018 + */ +static void +test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id) +{ + double *data = NULL; + double *read_buf = NULL; + hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing continually shrinking/growing chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_NCOLS; + + filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + memspace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, sel_dims, NULL); + VRFY((memspace >= 0), "Memory dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* + * Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_NCOLS / (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS; + stride[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS; + block[1] = (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]); + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((dset_id >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + data_size = sel_dims[0] * sel_dims[1] * sizeof(double); + + data = (double *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + read_buf = (double *)HDcalloc(1, data_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) { + /* Continually write random float data, followed by zeroed-out data */ + if (i % 2) + HDmemset(data, 0, data_size); + else { + size_t j; + for (j = 0; j < data_size / sizeof(*data); j++) { + data[j] = (rand() / (double)(RAND_MAX / (double)1.0L)); + } + } + + VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + if (i % 2) { + HDmemset(read_buf, 255, data_size); + } + else { + HDmemset(read_buf, 0, data_size); + } + + VRFY((H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "data verification succeeded"); + } + + if (read_buf) + HDfree(read_buf); + if (data) + HDfree(data); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests that filtered and unfiltered partial edge chunks can be + * written to and read from correctly in parallel when only one MPI + * rank writes to a particular partial edge chunk in the dataset. + * + * The dataset contains partial edge chunks in the second dimension. + * Each MPI rank selects a hyperslab in the shape of a single chunk + * that is offset to cover the whole edge chunk and part of the + * full chunk next to the edge chunk. + */ +static void +test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to unshared filtered edge chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + + filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS); + start[1] = + (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + read_buf = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Repeat the previous, but set option to not filter partial edge chunks */ + if (MAINPROCESS) + HDputs("Testing write to unshared unfiltered edge chunks"); + + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); + + dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS); + start[1] = + (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + HDmemset(read_buf, 255, data_size); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests that filtered and unfiltered partial edge chunks can be + * written to and read from correctly in parallel when every MPI + * rank writes to every partial edge chunk in the dataset. + * + * The dataset contains partial edge chunks in the second dimension. + * Each MPI rank selects a hyperslab in the shape of one row of each + * chunk that is offset in the second dimension to cover the whole + * edge chunk and part of the full chunk next to the edge chunk. + */ +static void +test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t chunk_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t sel_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t start[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t stride[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t count[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + hsize_t block[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS]; + size_t i, data_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing write to shared filtered edge chunks"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS; + dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS; + chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + + filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), + "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = + (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS / WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS); + count[1] = 1; + stride[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)1; + block[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = + (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + read_buf = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Repeat the previous, but set option to not filter partial edge chunks */ + if (MAINPROCESS) + HDputs("Testing write to shared unfiltered edge chunks"); + + H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS); + + dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME, + filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = + (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS / WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS); + count[1] = 1; + stride[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS; + stride[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + block[0] = (hsize_t)1; + block[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS; + start[0] = (hsize_t)mpi_rank; + start[1] = + (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS); + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify the correct data was written */ + dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + HDmemset(read_buf, 255, data_size); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded"); + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests that filtered and unfiltered partial edge chunks can be + * written to and read from correctly in parallel when only one + * MPI rank writes to a particular edge chunk in the dataset and + * only performs a partial write to the edge chunk. + * + * The dataset contains partial edge chunks in the second dimension. + * Each MPI rank selects a hyperslab in the shape of part of a single + * edge chunk and writes to just a portion of the edge chunk. + */ +static void +test_edge_chunks_partial_write(const char H5_ATTR_PARALLEL_UNUSED *parent_group, + H5Z_filter_t H5_ATTR_PARALLEL_UNUSED filter_id, + hid_t H5_ATTR_PARALLEL_UNUSED fapl_id, hid_t H5_ATTR_PARALLEL_UNUSED dcpl_id, + hid_t H5_ATTR_PARALLEL_UNUSED dxpl_id) +{ + /* TODO */ +} + +/* + * Tests that the parallel compression feature correctly handles + * writing fill values to a dataset and reading fill values from + * unallocated parts of a dataset. + */ +static void +test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *correct_buf = NULL; + C_DATATYPE fill_value; + hsize_t dataset_dims[FILL_VALUES_TEST_DATASET_DIMS]; + hsize_t chunk_dims[FILL_VALUES_TEST_DATASET_DIMS]; + hsize_t sel_dims[FILL_VALUES_TEST_DATASET_DIMS]; + hsize_t start[FILL_VALUES_TEST_DATASET_DIMS]; + hsize_t stride[FILL_VALUES_TEST_DATASET_DIMS]; + hsize_t count[FILL_VALUES_TEST_DATASET_DIMS]; + hsize_t block[FILL_VALUES_TEST_DATASET_DIMS]; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing fill values"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)FILL_VALUES_TEST_NROWS; + dataset_dims[1] = (hsize_t)FILL_VALUES_TEST_NCOLS; + chunk_dims[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS; + chunk_dims[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + filespace = H5Screate_simple(FILL_VALUES_TEST_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, FILL_VALUES_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Set a fill value */ + fill_value = FILL_VALUES_TEST_FILL_VAL; + VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set"); + + dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, + plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Allocate buffer for reading entire dataset */ + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + + read_buf = HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + correct_buf = HDcalloc(1, read_buf_size); + VRFY((NULL != correct_buf), "HDcalloc succeeded"); + + /* Read entire dataset and verify that the fill value is returned */ + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + correct_buf[i] = FILL_VALUES_TEST_FILL_VAL; + + VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + + /* + * Write to part of the first chunk in the dataset with + * all ranks, then read the whole dataset and ensure that + * the fill value is returned for the unwritten part of + * the chunk, as well as for the rest of the dataset that + * hasn't been written to yet. + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)(FILL_VALUES_TEST_CH_NCOLS - 1); + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* + * Each MPI rank communicates their written piece of data + * into each other rank's correctness-checking buffer + */ + recvcounts = HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + displs = HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(count[1] * block[1]); + displs[i] = (int)(i * dataset_dims[1]); + } + + VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + + /* + * Write to whole dataset and ensure fill value isn't returned + * after reading whole dataset back + */ + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)FILL_VALUES_TEST_NROWS / (hsize_t)FILL_VALUES_TEST_CH_NROWS; + count[1] = (hsize_t)FILL_VALUES_TEST_NCOLS / (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + block[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded"); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /******************************************************************** + * Set the fill time to H5D_FILL_TIME_ALLOC and repeat the previous * + ********************************************************************/ + + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) >= 0), "H5Pset_fill_time succeeded"); + + dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, + plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Read entire dataset and verify that the fill value is returned */ + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + correct_buf[i] = FILL_VALUES_TEST_FILL_VAL; + + VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + + /* + * Write to part of the first chunk in the dataset with + * all ranks, then read the whole dataset and ensure that + * the fill value is returned for the unwritten part of + * the chunk, as well as for the rest of the dataset that + * hasn't been written to yet. + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)(FILL_VALUES_TEST_CH_NCOLS - 1); + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(count[1] * block[1]); + displs[i] = (int)(i * dataset_dims[1]); + } + + /* + * Each MPI rank communicates their written piece of data + * into each other rank's correctness-checking buffer + */ + VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded"); + + /* + * Write to whole dataset and ensure fill value isn't returned + * after reading whole dataset back + */ + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)FILL_VALUES_TEST_NROWS / (hsize_t)FILL_VALUES_TEST_CH_NROWS; + count[1] = (hsize_t)FILL_VALUES_TEST_NCOLS / (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + block[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + if (correct_buf) + HDfree(correct_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests that the parallel compression feature can handle + * an undefined fill value. Nothing is verified in this + * test since the fill value isn't defined. + */ +static void +test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id) +{ + H5D_alloc_time_t alloc_time; + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + hsize_t dataset_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + hsize_t chunk_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + hsize_t sel_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + hsize_t start[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + hsize_t stride[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + hsize_t count[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + hsize_t block[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS]; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + + if (MAINPROCESS) + HDputs("Testing undefined fill value"); + + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NROWS; + dataset_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NCOLS; + chunk_dims[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS; + chunk_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + filespace = H5Screate_simple(FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Set an undefined fill value */ + VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, NULL) >= 0), "Fill Value set"); + + dset_id = H5Dcreate2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Allocate buffer for reading entire dataset */ + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + + read_buf = HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + /* + * Read entire dataset - nothing to verify since there's no fill value. + * If not using early space allocation, the read should fail since storage + * isn't allocated yet and no fill value is defined. + */ + if (alloc_time == H5D_ALLOC_TIME_EARLY) { + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + } + else { + H5E_BEGIN_TRY + { + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) < 0), + "Dataset read succeeded"); + } + H5E_END_TRY; + } + + /* + * Write to part of the first chunk in the dataset with + * all ranks, then read the whole dataset. Don't verify + * anything since there's no fill value defined. + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)(FILL_VALUE_UNDEFINED_TEST_CH_NCOLS - 1); + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* + * Write to whole dataset and ensure data is correct + * after reading whole dataset back + */ + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NROWS / (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS; + count[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NCOLS / (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS; + stride[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS; + block[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} + +/* + * Tests that the parallel compression feature correctly handles + * avoiding writing fill values to a dataset when the fill time + * is set as H5D_FILL_TIME_NEVER. + */ +static void +test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id, + hid_t dxpl_id) +{ + C_DATATYPE *data = NULL; + C_DATATYPE *read_buf = NULL; + C_DATATYPE *fill_buf = NULL; + C_DATATYPE fill_value; + hsize_t dataset_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + hsize_t chunk_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + hsize_t sel_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + hsize_t start[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + hsize_t stride[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + hsize_t count[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + hsize_t block[FILL_TIME_NEVER_TEST_DATASET_DIMS]; + size_t i, data_size, read_buf_size; + hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t filespace = H5I_INVALID_HID; + int *recvcounts = NULL; + int *displs = NULL; + + if (MAINPROCESS) + HDputs("Testing fill time H5D_FILL_TIME_NEVER"); + + /* + * Only run this test when incremental file space allocation is + * used, as HDF5's chunk allocation code always writes fill values + * when filters are in the pipeline, but parallel compression does + * incremental file space allocation differently. + */ + { + H5D_alloc_time_t alloc_time; + + VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded"); + + if (alloc_time != H5D_ALLOC_TIME_INCR) { + if (MAINPROCESS) + SKIPPED(); + return; + } + } + + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "Test file open succeeded"); + + group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gopen2 succeeded"); + + /* Create the dataspace for the dataset */ + dataset_dims[0] = (hsize_t)FILL_TIME_NEVER_TEST_NROWS; + dataset_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_NCOLS; + chunk_dims[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS; + chunk_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS; + sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR; + sel_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR; + + filespace = H5Screate_simple(FILL_TIME_NEVER_TEST_DATASET_DIMS, dataset_dims, NULL); + VRFY((filespace >= 0), "File dataspace creation succeeded"); + + /* Create chunked dataset */ + plist_id = H5Pcopy(dcpl_id); + VRFY((plist_id >= 0), "DCPL copy succeeded"); + + VRFY((H5Pset_chunk(plist_id, FILL_TIME_NEVER_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set"); + + /* Add test filter to the pipeline */ + VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set"); + + /* Set a fill value */ + fill_value = FILL_VALUES_TEST_FILL_VAL; + VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set"); + + /* Set fill time of 'never' */ + VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_NEVER) >= 0), "H5Pset_fill_time succeeded"); + + dset_id = H5Dcreate2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, + H5P_DEFAULT, plist_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED); + + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + + /* Allocate buffer for reading entire dataset */ + read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf); + + read_buf = HDcalloc(1, read_buf_size); + VRFY((NULL != read_buf), "HDcalloc succeeded"); + + fill_buf = HDcalloc(1, read_buf_size); + VRFY((NULL != fill_buf), "HDcalloc succeeded"); + + /* Read entire dataset and verify that the fill value isn't returned */ + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL; + + /* + * It should be very unlikely for the dataset's random + * values to all be the fill value, so this should be + * a safe comparison in theory. + */ + VRFY((0 != HDmemcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded"); + + /* + * Write to part of the first chunk in the dataset with + * all ranks, then read the whole dataset and ensure that + * the fill value isn't returned for the unwritten part of + * the chunk, as well as for the rest of the dataset that + * hasn't been written to yet. + */ + count[0] = 1; + count[1] = 1; + stride[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS; + block[0] = 1; + block[1] = (hsize_t)(FILL_TIME_NEVER_TEST_CH_NCOLS - 1); + start[0] = (hsize_t)mpi_rank; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + /* Select hyperslab in the file */ + filespace = H5Dget_space(dset_id); + VRFY((filespace >= 0), "File dataspace retrieval succeeded"); + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + /* Fill data buffer */ + data_size = sel_dims[0] * sel_dims[1] * sizeof(*data); + + data = (C_DATATYPE *)HDcalloc(1, data_size); + VRFY((NULL != data), "HDcalloc succeeded"); + + for (i = 0; i < data_size / sizeof(*data); i++) + data[i] = (C_DATATYPE)GEN_DATA(i); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + /* + * Each MPI rank communicates their written piece of data + * into each other rank's correctness-checking buffer + */ + recvcounts = HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts)); + VRFY((NULL != recvcounts), "HDcalloc succeeded"); + + displs = HDcalloc(1, (size_t)mpi_size * sizeof(*displs)); + VRFY((NULL != displs), "HDcalloc succeeded"); + + for (i = 0; i < (size_t)mpi_size; i++) { + recvcounts[i] = (int)(count[1] * block[1]); + displs[i] = (int)(i * dataset_dims[1]); + } + + VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, recvcounts, + displs, C_DATATYPE_MPI, comm)), + "MPI_Allgatherv succeeded"); + + /* + * It should be very unlikely for the dataset's random + * values to all be the fill value, so this should be + * a safe comparison in theory. + */ + VRFY((0 != HDmemcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded"); + + /* + * Write to whole dataset and ensure fill value isn't returned + * after reading whole dataset back + */ + + /* Each process defines the dataset selection in memory and writes + * it to the hyperslab in the file + */ + count[0] = (hsize_t)FILL_TIME_NEVER_TEST_NROWS / (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS; + count[1] = (hsize_t)FILL_TIME_NEVER_TEST_NCOLS / (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS; + stride[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS; + stride[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS; + block[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS / (hsize_t)mpi_size; + block[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; + + if (VERBOSE_MED) { + HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE + ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE + ", %" PRIuHSIZE " ]\n", + mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]); + HDfflush(stdout); + } + + VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0), + "Hyperslab selection succeeded"); + + VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0), + "Dataset write succeeded"); + + /* Verify space allocation status */ + verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN); + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + + /* Verify correct data was written */ + dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), + "Dataset read succeeded"); + + for (i = 0; i < read_buf_size / sizeof(*read_buf); i++) + VRFY((read_buf[i] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded"); + + if (displs) + HDfree(displs); + if (recvcounts) + HDfree(recvcounts); + if (data) + HDfree(data); + if (read_buf) + HDfree(read_buf); + if (fill_buf) + HDfree(fill_buf); + + VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded"); + VRFY((H5Gclose(group_id) >= 0), "Group close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + return; +} +#endif + +int +main(int argc, char **argv) +{ + size_t cur_filter_idx = 0; + size_t num_filters = 0; + hid_t file_id = H5I_INVALID_HID; + hid_t fcpl_id = H5I_INVALID_HID; + hid_t group_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + int mpi_code; + + /* Initialize MPI */ + MPI_Init(&argc, &argv); + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + if (mpi_size <= 0) { + if (MAINPROCESS) { + HDprintf("The Parallel Filters tests require at least 1 rank.\n"); + HDprintf("Quitting...\n"); + } + + MPI_Abort(MPI_COMM_WORLD, 1); + } + + if (H5dont_atexit() < 0) { + if (MAINPROCESS) { + HDprintf("Failed to turn off atexit processing. Continue.\n"); + } + } + + H5open(); + + if (MAINPROCESS) { + HDprintf("==========================\n"); + HDprintf(" Parallel Filters tests\n"); + HDprintf("==========================\n\n"); + } + + if (VERBOSE_MED) + h5_show_hostname(); + + TestAlarmOn(); + + num_filters = ARRAY_SIZE(filterIDs); + + /* Set up file access property list with parallel I/O access, + * collective metadata reads/writes and the latest library + * version bounds */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + VRFY((H5Pset_fapl_mpio(fapl_id, comm, info) >= 0), "Set FAPL MPIO succeeded"); + VRFY((H5Pset_all_coll_metadata_ops(fapl_id, TRUE) >= 0), "H5Pset_all_coll_metadata_ops succeeded"); + VRFY((H5Pset_coll_metadata_write(fapl_id, TRUE) >= 0), "H5Pset_coll_metadata_write succeeded"); + + VRFY((H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0), + "Set libver bounds succeeded"); + + /* + * Set up Paged and Persistent Free Space Management + */ + fcpl_id = H5Pcreate(H5P_FILE_CREATE); + VRFY((fcpl_id >= 0), "FCPL creation succeeded"); + + VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, 1) >= 0), + "H5Pset_file_space_strategy succeeded"); + + VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL), + "Test file name created"); + + file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, fcpl_id, fapl_id); + VRFY((file_id >= 0), "Test file creation succeeded"); + + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + file_id = H5I_INVALID_HID; + + /* Create property list for collective dataset write */ + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DXPL creation succeeded"); + + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Create DCPL for dataset creation */ + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "DCPL creation succeeded"); + + /* Run tests with all available filters */ + for (cur_filter_idx = 0; cur_filter_idx < num_filters; cur_filter_idx++) { + H5FD_mpio_chunk_opt_t chunk_opt; + H5Z_filter_t cur_filter = filterIDs[cur_filter_idx]; + + /* Run tests with both linked-chunk and multi-chunk I/O */ + for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) { + H5D_alloc_time_t space_alloc_time; + + /* Run tests with all available space allocation times */ + for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR; + space_alloc_time++) { + const char *alloc_time; + unsigned filter_config; + htri_t filter_avail; + size_t i; + char group_name[512]; + + switch (space_alloc_time) { + case H5D_ALLOC_TIME_EARLY: + alloc_time = "Early"; + break; + case H5D_ALLOC_TIME_LATE: + alloc_time = "Late"; + break; + case H5D_ALLOC_TIME_INCR: + alloc_time = "Incremental"; + break; + default: + alloc_time = "Unknown"; + } + + if (MAINPROCESS) + HDprintf("== Running tests with filter '%s' using '%s' and '%s' allocation time ==\n\n", + filterNames[cur_filter_idx], + H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O" : "Multi-Chunk I/O", + alloc_time); + + /* Make sure current filter is available before testing with it */ + filter_avail = H5Zfilter_avail(cur_filter); + VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded"); + + if (!filter_avail) { + if (MAINPROCESS) + HDprintf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n", + filterNames[cur_filter_idx]); + continue; + } + + /* Get the current filter's info */ + VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded"); + + /* Determine if filter is encode-enabled */ + if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) { + if (MAINPROCESS) + HDprintf(" ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n", + filterNames[cur_filter_idx]); + continue; + } + + /* Set space allocation time */ + VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0), "H5Pset_alloc_time succeeded"); + + /* Set chunk I/O optimization method */ + VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0), + "H5Pset_dxpl_mpio_chunk_opt succeeded"); + + /* Create a group to hold all the datasets for this combination + * of filter and chunk optimization mode. Then, close the file + * again since some tests may need to open the file in a special + * way, like on rank 0 only */ + file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + HDsnprintf(group_name, sizeof(group_name), "%s_%s_%s", filterNames[cur_filter_idx], + H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io", + alloc_time); + + group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((group_id >= 0), "H5Gcreate2 succeeded"); + + VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed"); + group_id = H5I_INVALID_HID; + + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + file_id = H5I_INVALID_HID; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + test_func func = tests[i]; + + if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) { + func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } + } + + if (MAINPROCESS) + HDputs(""); + } + } + } + + VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded"); + dcpl_id = H5I_INVALID_HID; + + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); + dxpl_id = H5I_INVALID_HID; + + if (nerrors) + goto exit; + + if (MAINPROCESS) + HDputs("All Parallel Filters tests passed\n"); + +exit: + if (nerrors) + if (MAINPROCESS) + HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : ""); + + TestAlarmOff(); + + h5_clean_files(FILENAME, fapl_id); + fapl_id = H5I_INVALID_HID; + + if (dcpl_id >= 0) + VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded"); + if (dxpl_id >= 0) + VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded"); + if (fapl_id >= 0) + VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded"); + if (fcpl_id >= 0) + VRFY((H5Pclose(fcpl_id) >= 0), "H5Pclose succeeded"); + if (group_id >= 0) + VRFY((H5Gclose(group_id) >= 0), "H5Gclose succeeded"); + if (file_id >= 0) + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + + H5close(); + + MPI_Finalize(); + + exit((nerrors ? EXIT_FAILURE : EXIT_SUCCESS)); +} diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h new file mode 100644 index 0000000..335b43a --- /dev/null +++ b/testpar/t_filters_parallel.h @@ -0,0 +1,496 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Jordan Henderson + * 01/31/2017 + * + * This file contains #defines for tests of the use + * of filters in parallel HDF5, implemented in + * H5Dmpio.c + */ + +#ifndef TEST_PARALLEL_FILTERS_H_ +#define TEST_PARALLEL_FILTERS_H_ + +#include <string.h> + +#include "stdlib.h" +#include "testpar.h" + +#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) + +/* Used to load other filters than GZIP */ +/* #define DYNAMIC_FILTER */ /* Uncomment and define the fields below to use a dynamically loaded filter */ + +#ifdef DYNAMIC_FILTER +#define FILTER_NUM_CDVALUES 1 +const unsigned int cd_values[FILTER_NUM_CDVALUES] = {0}; +unsigned int flags = 0; +size_t cd_nelmts = FILTER_NUM_CDVALUES; +#endif + +/* Common defines for all tests */ +#define C_DATATYPE long +#define C_DATATYPE_MPI MPI_LONG +#define COMPOUND_C_DATATYPE cmpd_filtered_t +#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG + +/* Macro used to generate data for datasets for later verification */ +#define GEN_DATA(i) INCREMENTAL_DATA(i) + +/* For experimental purposes only, will cause tests to fail data verification phase - JTH */ +/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon + selected mode */ + +#define INCREMENTAL_DATA(i) ((size_t)mpi_rank + i) /* Generates incremental test data */ +#define RANK_DATA(i) \ + (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */ + +#define DEFAULT_DEFLATE_LEVEL 9 + +#define DIM0_SCALE_FACTOR 4 +#define DIM1_SCALE_FACTOR 2 + +/* Struct type for the compound datatype filtered dataset tests */ +typedef struct { + short field1; + int field2; + long field3; +} COMPOUND_C_DATATYPE; + +/* Defines for the one-chunk filtered dataset write test */ +#define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write" +#define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2 +#define WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS \ + (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS \ + (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS +#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS + +/* Defines for the unshared filtered chunks write test */ +#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size) + +/* Defines for the unshared filtered chunks partial write test */ +#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME "unshared_filtered_chunks_partial_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS 2 +#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS (DIM1_SCALE_FACTOR) + +/* Defines for the shared filtered chunks write test */ +#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_write" +#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_NROWS (WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the unshared filtered chunks w/ single unlim. dimension write test */ +#define WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME "unshared_filtered_chunks_single_unlim_dim_write" +#define WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS 2 +#define WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS (WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS / mpi_size) +#define WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS (WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS / mpi_size) +#define WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS 5 + +/* Defines for the shared filtered chunks w/ single unlim. dimension write test */ +#define WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME "shared_filtered_chunks_single_unlim_dim_write" +#define WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS 2 +#define WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS (mpi_size) +#define WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS (mpi_size) +#define WRITE_SHARED_ONE_UNLIM_DIM_NROWS (WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS * DIM0_SCALE_FACTOR) +#define WRITE_SHARED_ONE_UNLIM_DIM_NCOLS (WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS * DIM1_SCALE_FACTOR) +#define WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS 5 + +/* Defines for the unshared filtered chunks w/ two unlim. dimension write test */ +#define WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME "unshared_filtered_chunks_two_unlim_dim_write" +#define WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS 2 +#define WRITE_UNSHARED_TWO_UNLIM_DIM_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS 5 + +/* Defines for the shared filtered chunks w/ two unlim. dimension write test */ +#define WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME "shared_filtered_chunks_two_unlim_dim_write" +#define WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS 2 +#define WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS (mpi_size) +#define WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS (mpi_size) +#define WRITE_SHARED_TWO_UNLIM_DIM_NROWS (mpi_size) +#define WRITE_SHARED_TWO_UNLIM_DIM_NCOLS (mpi_size) +#define WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS 5 + +/* Defines for the filtered chunks write test where a process has no selection */ +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write" +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS \ + (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS \ + (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) +#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1) + +/* Defines for the filtered chunks write test where no process has a selection */ +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_write" +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS \ + (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS \ + (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) + +/* Defines for the filtered chunks write test with a point selection */ +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_write" +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS \ + (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS \ + (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) + +/* Defines for the filtered dataset interleaved write test */ +#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "filtered_dataset_interleaved_write" +#define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2 +#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size) +#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR) +#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS \ + (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR) +#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS \ + (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the unshared transformed and filtered chunks write test */ +#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME "unshared_transformed_filtered_chunks_write" +#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS \ + (WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS / mpi_size) +#define WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS \ + (WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS / mpi_size) + +/* Defines for the 3D unshared filtered dataset separate page write test */ +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME \ + "3D_unshared_filtered_chunks_separate_pages_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3 +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS \ + (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS \ + (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size) + +/* Defines for the 3D unshared filtered dataset same page write test */ +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME \ + "3D_unshared_filtered_chunks_same_pages_write" +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3 +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS \ + (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size) +#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS \ + (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size) + +/* Defines for the 3d shared filtered dataset write test */ +#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_write" +#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3 +#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) + +/* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */ +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME \ + "compound_unshared_filtered_chunks_no_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC \ + (WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size) + +/* Defines for the compound datatype filtered dataset no conversion write test with shared chunks */ +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME \ + "compound_shared_filtered_chunks_no_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC \ + WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS + +/* Defines for the compound datatype filtered dataset type conversion write test with unshared chunks */ +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME \ + "compound_unshared_filtered_chunks_type_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC \ + (WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size) + +/* Defines for the compound datatype filtered dataset type conversion write test with shared chunks */ +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME \ + "compound_shared_filtered_chunks_type_conversion_write" +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1 +#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC \ + WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS + +/* Defines for the one-chunk filtered dataset read test */ +#define READ_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_read" +#define READ_ONE_CHUNK_FILTERED_DATASET_DIMS 2 +#define READ_ONE_CHUNK_FILTERED_DATASET_NROWS \ + (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define READ_ONE_CHUNK_FILTERED_DATASET_NCOLS \ + (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */ +#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS READ_ONE_CHUNK_FILTERED_DATASET_NROWS +#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS READ_ONE_CHUNK_FILTERED_DATASET_NCOLS + +/* Defines for the unshared filtered chunks read test */ +#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_read" +#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size) + +/* Defines for the shared filtered chunks read test */ +#define READ_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_read" +#define READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size) +#define READ_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size) +#define READ_SHARED_FILTERED_CHUNKS_NROWS (READ_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_NCOLS (READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the filtered chunks read test where a process has no selection */ +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_read" +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS \ + (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS \ + (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) +#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1) + +/* Defines for the filtered chunks read test where no process has a selection */ +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_read" +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS \ + (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS \ + (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) + +/* Defines for the filtered chunks read test with a point selection */ +#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_read" +#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size) +#define READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size) + +/* Defines for the filtered dataset interleaved read test */ +#define INTERLEAVED_READ_FILTERED_DATASET_NAME "filtered_dataset_interleaved_read" +#define INTERLEAVED_READ_FILTERED_DATASET_DIMS 2 +#define INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS (mpi_size) +#define INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR) +#define INTERLEAVED_READ_FILTERED_DATASET_NROWS \ + (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR) +#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS \ + (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the unshared transformed and filtered chunks read test */ +#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME "unshared_transformed_filtered_chunks_read" +#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS 2 +#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS \ + (READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS / mpi_size) +#define READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS \ + (READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS / mpi_size) + +/* Defines for the 3D unshared filtered dataset separate page read test */ +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME \ + "3D_unshared_filtered_chunks_separate_pages_read" +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3 +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS \ + (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS \ + (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size) + +/* Defines for the 3D unshared filtered dataset same page read test */ +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_read" +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3 +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS \ + (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size) +#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS \ + (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size) + +/* Defines for the 3d shared filtered dataset read test */ +#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_read" +#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3 +#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size) +#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_3D_NROWS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_3D_NCOLS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR) +#define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size) + +/* Defines for the compound datatype filtered dataset no conversion read test with unshared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME \ + "compound_unshared_filtered_chunks_no_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC \ + (READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size) + +/* Defines for the compound datatype filtered dataset no conversion read test with shared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME \ + "compound_shared_filtered_chunks_no_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC \ + READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS + +/* Defines for the compound datatype filtered dataset type conversion read test with unshared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME \ + "compound_unshared_filtered_chunks_type_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC \ + (READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size) + +/* Defines for the compound datatype filtered dataset type conversion read test with shared chunks */ +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME \ + "compound_shared_filtered_chunks_type_conversion_read" +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1 +#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC \ + READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS + +/* Defines for the write file serially/read in parallel test */ +#define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel" +#define WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS 3 +#define WRITE_SERIAL_READ_PARALLEL_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_SERIAL_READ_PARALLEL_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_SERIAL_READ_PARALLEL_DEPTH (mpi_size) +#define WRITE_SERIAL_READ_PARALLEL_CH_NROWS (WRITE_SERIAL_READ_PARALLEL_NROWS / mpi_size) +#define WRITE_SERIAL_READ_PARALLEL_CH_NCOLS (WRITE_SERIAL_READ_PARALLEL_NCOLS / mpi_size) + +/* Defines for the write file in parallel/read serially test */ +#define WRITE_PARALLEL_READ_SERIAL_DATASET_NAME "write_parallel_read_serial" +#define WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS 3 +#define WRITE_PARALLEL_READ_SERIAL_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_PARALLEL_READ_SERIAL_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define WRITE_PARALLEL_READ_SERIAL_DEPTH (mpi_size) +#define WRITE_PARALLEL_READ_SERIAL_CH_NROWS (WRITE_PARALLEL_READ_SERIAL_NROWS / mpi_size) +#define WRITE_PARALLEL_READ_SERIAL_CH_NCOLS (WRITE_PARALLEL_READ_SERIAL_NCOLS / mpi_size) + +/* Defines for the shrinking/growing chunks test */ +#define SHRINKING_GROWING_CHUNKS_DATASET_NAME "shrink_grow_chunks_test" +#define SHRINKING_GROWING_CHUNKS_DATASET_DIMS 2 +#define SHRINKING_GROWING_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR) +#define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size) +#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size) +#define SHRINKING_GROWING_CHUNKS_NLOOPS 20 + +/* Defines for the unshared filtered edge chunks write test */ +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "unshared_filtered_edge_chunks_write" +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2 "unshared_filtered_edge_chunks_no_filter_write" +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS 2 +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR) +#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS \ + (mpi_size * DIM1_SCALE_FACTOR) + (WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS - 1) + +/* Defines for the shared filtered edge chunks write test */ +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "shared_filtered_edge_chunks_write" +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2 "shared_filtered_edge_chunks_no_filter_write" +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS 2 +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS (mpi_size) +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS (mpi_size) +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS \ + (WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR) +#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS \ + ((WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) + \ + (WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS - 1)) + +/* Defines for the fill values test */ +#define FILL_VALUES_TEST_DATASET_NAME "fill_value_test" +#define FILL_VALUES_TEST_DATASET_NAME2 "fill_value_alloc_test" +#define FILL_VALUES_TEST_DATASET_DIMS 2 +#define FILL_VALUES_TEST_FILL_VAL (-1) +#define FILL_VALUES_TEST_CH_NROWS (mpi_size) +#define FILL_VALUES_TEST_CH_NCOLS (mpi_size + 1) +#define FILL_VALUES_TEST_NROWS (FILL_VALUES_TEST_CH_NROWS * DIM0_SCALE_FACTOR) +#define FILL_VALUES_TEST_NCOLS (FILL_VALUES_TEST_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the undefined fill value test */ +#define FILL_VALUE_UNDEFINED_TEST_DATASET_NAME "fill_value_undefined_test" +#define FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS 2 +#define FILL_VALUE_UNDEFINED_TEST_CH_NROWS (mpi_size) +#define FILL_VALUE_UNDEFINED_TEST_CH_NCOLS (mpi_size + 1) +#define FILL_VALUE_UNDEFINED_TEST_NROWS (FILL_VALUE_UNDEFINED_TEST_CH_NROWS * DIM0_SCALE_FACTOR) +#define FILL_VALUE_UNDEFINED_TEST_NCOLS (FILL_VALUE_UNDEFINED_TEST_CH_NCOLS * DIM1_SCALE_FACTOR) + +/* Defines for the fill time of 'never' test */ +#define FILL_TIME_NEVER_TEST_DATASET_NAME "fill_time_never_test" +#define FILL_TIME_NEVER_TEST_DATASET_DIMS 2 +#define FILL_TIME_NEVER_TEST_FILL_VAL (-1) +#define FILL_TIME_NEVER_TEST_CH_NROWS (mpi_size) +#define FILL_TIME_NEVER_TEST_CH_NCOLS (mpi_size + 1) +#define FILL_TIME_NEVER_TEST_NROWS (FILL_TIME_NEVER_TEST_CH_NROWS * DIM0_SCALE_FACTOR) +#define FILL_TIME_NEVER_TEST_NCOLS (FILL_TIME_NEVER_TEST_CH_NCOLS * DIM1_SCALE_FACTOR) + +#endif /* TEST_PARALLEL_FILTERS_H_ */ diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c new file mode 100644 index 0000000..2f2ad61 --- /dev/null +++ b/testpar/t_init_term.c @@ -0,0 +1,70 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Mohamad Chaarawi + * June 2015 + * + * Purpose: This test checks for the correct initialization and + * termination of the HDF5 library with MPI init and finalize. + */ + +#include "testphdf5.h" + +int nerrors = 0; /* errors count */ + +const char *FILENAME[] = {"after_mpi_fin", NULL}; + +int +main(int argc, char **argv) +{ + int mpi_size, mpi_rank; + MPI_Comm comm = MPI_COMM_WORLD; + + /* Initialize and finalize MPI */ + MPI_Init(&argc, &argv); + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + if (MAINPROCESS) + TESTING("Usage of Serial HDF5 after MPI_Finalize() is called"); + + MPI_Finalize(); + + nerrors += GetTestNumErrs(); + + /* test if we can initialize the library with MPI being finalized + and create a file serially */ + H5open(); + + if (mpi_rank == 0) { + char filename[1024]; + hid_t file_id; + + h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof filename); + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + H5Fclose(file_id); + file_id = -1; + } + + H5close(); + + if (MAINPROCESS) { + if (0 == nerrors) + PASSED(); + else + H5_FAILED(); + } + + return (nerrors != 0); +} diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c index f294b93..7cdfecf 100644 --- a/testpar/t_mdset.c +++ b/testpar/t_mdset.c @@ -1,28 +1,26 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include "testphdf5.h" +#include "H5Dprivate.h" +#include "H5private.h" -#define DIM 2 -#define SIZE 32 -#define NDATASET 4 +#define DIM 2 +#define SIZE 32 +#define NDATASET 4 #define GROUP_DEPTH 128 enum obj_type { is_group, is_dset }; - -static int get_size(void); +static int get_size(void); static void write_dataset(hid_t, hid_t, hid_t); static int read_dataset(hid_t, hid_t, hid_t); static void create_group_recursive(hid_t, hid_t, hid_t, int); @@ -33,7 +31,6 @@ static int read_attribute(hid_t, int, int); static int check_value(DATATYPE *, DATATYPE *, int); static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int); - /* * The size value computed by this function is used extensively in * configuring tests for the current number of processes. @@ -54,14 +51,11 @@ get_size(void) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - if(mpi_size > size ) { - - if((mpi_size % 2) == 0 ) { - + if (mpi_size > size) { + if ((mpi_size % 2) == 0) { size = mpi_size; - - } else { - + } + else { size = mpi_size + 1; } } @@ -69,7 +63,7 @@ get_size(void) VRFY((mpi_size <= size), "mpi_size <= size"); VRFY(((size % 2) == 0), "size isn't even"); - return(size); + return (size); } /* get_size() */ @@ -77,14 +71,15 @@ get_size(void) * Example of using PHDF5 to create a zero sized dataset. * */ -void zero_dim_dset(void) +void +zero_dim_dset(void) { - int mpi_size, mpi_rank; - const char *filename; - hid_t fid, plist, dcpl, dsid, sid; - hsize_t dim, chunk_dim; - herr_t ret; - int data[1]; + int mpi_size, mpi_rank; + const char *filename; + hid_t fid, plist, dcpl, dsid, sid; + hsize_t dim, chunk_dim; + herr_t ret; + int data[1]; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -92,37 +87,37 @@ void zero_dim_dset(void) filename = GetTestParameters(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((plist>=0), "create_faccess_plist succeeded"); + VRFY((plist >= 0), "create_faccess_plist succeeded"); fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - VRFY((fid>=0), "H5Fcreate succeeded"); + VRFY((fid >= 0), "H5Fcreate succeeded"); ret = H5Pclose(plist); - VRFY((ret>=0), "H5Pclose succeeded"); + VRFY((ret >= 0), "H5Pclose succeeded"); dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl>=0), "failed H5Pcreate"); + VRFY((dcpl >= 0), "failed H5Pcreate"); /* Set 1 chunk size */ chunk_dim = 1; - ret = H5Pset_chunk(dcpl, 1, &chunk_dim); - VRFY((ret>=0), "failed H5Pset_chunk"); + ret = H5Pset_chunk(dcpl, 1, &chunk_dim); + VRFY((ret >= 0), "failed H5Pset_chunk"); /* Create 1D dataspace with 0 dim size */ dim = 0; sid = H5Screate_simple(1, &dim, NULL); - VRFY((sid>=0), "failed H5Screate_simple"); + VRFY((sid >= 0), "failed H5Screate_simple"); /* Create chunked dataset */ dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dsid>=0), "failed H5Dcreate2"); + VRFY((dsid >= 0), "failed H5Dcreate2"); /* write 0 elements from dataset */ ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data); - VRFY((ret>=0), "failed H5Dwrite"); + VRFY((ret >= 0), "failed H5Dwrite"); /* Read 0 elements from dataset */ ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data); - VRFY((ret>=0), "failed H5Dread"); + VRFY((ret >= 0), "failed H5Dread"); H5Pclose(dcpl); H5Dclose(dsid); @@ -133,81 +128,77 @@ void zero_dim_dset(void) /* * Example of using PHDF5 to create ndatasets datasets. Each process write * a slab of array to the file. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ -void multiple_dset_write(void) +void +multiple_dset_write(void) { - int i, j, n, mpi_size, mpi_rank, size; - hid_t iof, plist, dataset, memspace, filespace; - hid_t dcpl; /* Dataset creation property list */ - hsize_t chunk_origin [DIM]; - hsize_t chunk_dims [DIM], file_dims [DIM]; - hsize_t count[DIM]={1,1}; - double * outme = NULL; - double fill=1.0; /* Fill value */ - char dname [100]; - herr_t ret; + int i, j, n, mpi_size, mpi_rank, size; + hid_t iof, plist, dataset, memspace, filespace; + hid_t dcpl; /* Dataset creation property list */ + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM]; + hsize_t count[DIM] = {1, 1}; + double *outme = NULL; + double fill = 1.0; /* Fill value */ + char dname[100]; + herr_t ret; const H5Ptest_param_t *pt; - char *filename; - int ndatasets; + char *filename; + int ndatasets; - pt = GetTestParameters(); - filename = pt->name; + pt = GetTestParameters(); + filename = pt->name; ndatasets = pt->count; size = get_size(); + H5_CHECK_OVERFLOW(size, int, size_t); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - outme = HDmalloc((size_t)(size * size * sizeof(double))); + outme = HDmalloc((size_t)size * (size_t)size * sizeof(double)); VRFY((outme != NULL), "HDmalloc succeeded for outme"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - VRFY((plist>=0), "create_faccess_plist succeeded"); + VRFY((plist >= 0), "create_faccess_plist succeeded"); iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); - VRFY((iof>=0), "H5Fcreate succeeded"); + VRFY((iof >= 0), "H5Fcreate succeeded"); ret = H5Pclose(plist); - VRFY((ret>=0), "H5Pclose succeeded"); + VRFY((ret >= 0), "H5Pclose succeeded"); /* decide the hyperslab according to process number. */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); - memspace = H5Screate_simple(DIM, chunk_dims, NULL); + memspace = H5Screate_simple(DIM, chunk_dims, NULL); filespace = H5Screate_simple(DIM, file_dims, NULL); - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); - VRFY((ret>=0), "mdata hyperslab selection"); + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((ret >= 0), "mdata hyperslab selection"); /* Create a dataset creation property list */ dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl>=0), "dataset creation property list succeeded"); + VRFY((dcpl >= 0), "dataset creation property list succeeded"); ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); - VRFY((ret>=0), "set fill-value succeeded"); + VRFY((ret >= 0), "set fill-value succeeded"); - for(n = 0; n < ndatasets; n++) { - sprintf(dname, "dataset %d", n); - dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), dname); + for (n = 0; n < ndatasets; n++) { + HDsnprintf(dname, sizeof(dname), "dataset %d", n); + dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); + VRFY((dataset > 0), dname); - /* calculate data to write */ - for(i = 0; i < size; i++) - for(j = 0; j < size; j++) - outme [(i * size) + j] = n*1000 + mpi_rank; + /* calculate data to write */ + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = n * 1000 + mpi_rank; - H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); + H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme); - H5Dclose(dataset); + H5Dclose(dataset); #ifdef BARRIER_CHECKS - if(!((n+1) % 10)) { - printf("created %d datasets\n", n+1); - MPI_Barrier(MPI_COMM_WORLD); - } + if (!((n + 1) % 10)) { + HDprintf("created %d datasets\n", n + 1); + MPI_Barrier(MPI_COMM_WORLD); + } #endif /* BARRIER_CHECKS */ } @@ -219,52 +210,49 @@ void multiple_dset_write(void) HDfree(outme); } - /* Example of using PHDF5 to create, write, and read compact dataset. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ -void compact_dataset(void) +void +compact_dataset(void) { - int i, j, mpi_size, mpi_rank, size, err_num=0; - hid_t iof, plist, dcpl, dxpl, dataset, filespace; - hsize_t file_dims [DIM]; - double * outme; - double * inme; - char dname[]="dataset"; - herr_t ret; + int i, j, mpi_size, mpi_rank, size, err_num = 0; + hid_t iof, plist, dcpl, dxpl, dataset, filespace; + hsize_t file_dims[DIM]; + double *outme; + double *inme; + char dname[] = "dataset"; + herr_t ret; const char *filename; +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + hbool_t prop_value; +#endif size = get_size(); - for(i = 0; i < DIM; i++ ) - file_dims[i] = size; + for (i = 0; i < DIM; i++) + file_dims[i] = (hsize_t)size; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - outme = HDmalloc((size_t)(size * size * sizeof(double))); + outme = HDmalloc((size_t)((size_t)size * (size_t)size * sizeof(double))); VRFY((outme != NULL), "HDmalloc succeeded for outme"); - inme = HDmalloc((size_t)(size * size * sizeof(double))); + inme = HDmalloc((size_t)size * (size_t)size * sizeof(double)); VRFY((outme != NULL), "HDmalloc succeeded for inme"); filename = GetTestParameters(); VRFY((mpi_size <= size), "mpi_size <= size"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); /* Define data space */ filespace = H5Screate_simple(DIM, file_dims, NULL); /* Create a compact dataset */ dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl>=0), "dataset creation property list succeeded"); + VRFY((dcpl >= 0), "dataset creation property list succeeded"); ret = H5Pset_layout(dcpl, H5D_COMPACT); VRFY((dcpl >= 0), "set property list for compact dataset"); ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY); @@ -278,16 +266,15 @@ void compact_dataset(void) VRFY((dxpl >= 0), ""); ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* Recalculate data to write. Each process writes the same data. */ - for(i = 0; i < size; i++) - for(j = 0; j < size; j++) - outme[(i * size) + j] =(i + j) * 1000; + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = (i + j) * 1000; ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme); VRFY((ret >= 0), "H5Dwrite succeeded"); @@ -300,7 +287,7 @@ void compact_dataset(void) /* Open the file and dataset, read and compare the data. */ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); + iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); VRFY((iof >= 0), "H5Fopen succeeded"); /* set up the collective transfer properties list */ @@ -308,24 +295,39 @@ void compact_dataset(void) VRFY((dxpl >= 0), ""); ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - dataset = H5Dopen2(iof, dname, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dopen2 succeeded"); +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL, + NULL, NULL, NULL, NULL, NULL); + VRFY((ret >= 0), "H5Pinsert2() succeeded"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme); VRFY((ret >= 0), "H5Dread succeeded"); +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = FALSE; + ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), "H5Pget succeeded"); + VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO), + "rank 0 Bcast optimization was performed for a compact dataset"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + /* Verify data value */ - for(i = 0; i < size; i++) - for(j = 0; j < size; j++) - if(inme[(i * size) + j] != outme[(i * size) + j]) - if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]); + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j])) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, + outme[(i * size) + j], inme[(i * size) + j]); H5Pclose(plist); H5Pclose(dxpl); @@ -338,25 +340,18 @@ void compact_dataset(void) /* * Example of using PHDF5 to create, write, and read dataset and attribute * of Null dataspace. - * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. - * - * JRM - 8/24/04 */ -void null_dataset(void) +void +null_dataset(void) { - int mpi_size, mpi_rank; - hid_t iof, plist, dxpl, dataset, attr, sid; - unsigned uval=2; /* Buffer for writing to dataset */ - int val=1; /* Buffer for writing to attribute */ - int nelem; - char dname[]="dataset"; - char attr_name[]="attribute"; - herr_t ret; + int mpi_size, mpi_rank; + hid_t iof, plist, dxpl, dataset, attr, sid; + unsigned uval = 2; /* Buffer for writing to dataset */ + int val = 1; /* Buffer for writing to attribute */ + hssize_t nelem; + char dname[] = "dataset"; + char attr_name[] = "attribute"; + herr_t ret; const char *filename; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -365,7 +360,7 @@ void null_dataset(void) filename = GetTestParameters(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); /* Define data space */ sid = H5Screate(H5S_NULL); @@ -383,12 +378,11 @@ void null_dataset(void) VRFY((dxpl >= 0), ""); ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* Write "nothing" to the dataset(with type conversion) */ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval); VRFY((ret >= 0), "H5Dwrite succeeded"); @@ -409,7 +403,7 @@ void null_dataset(void) /* Open the file and dataset, read and compare the data. */ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); + iof = H5Fopen(filename, H5F_ACC_RDONLY, plist); VRFY((iof >= 0), "H5Fopen succeeded"); /* set up the collective transfer properties list */ @@ -417,27 +411,27 @@ void null_dataset(void) VRFY((dxpl >= 0), ""); ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - dataset = H5Dopen2(iof, dname, H5P_DEFAULT); VRFY((dataset >= 0), "H5Dopen2 succeeded"); /* Try reading from the dataset(make certain our buffer is unmodified) */ ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval); - VRFY((ret>=0), "H5Dread"); - VRFY((uval==2), "H5Dread"); + VRFY((ret >= 0), "H5Dread"); + VRFY((uval == 2), "H5Dread"); /* Open the attribute for the dataset */ attr = H5Aopen(dataset, attr_name, H5P_DEFAULT); VRFY((attr >= 0), "H5Aopen"); - /* Try reading from the attribute(make certain our buffer is unmodified) */ ret = H5Aread(attr, H5T_NATIVE_INT, &val); - VRFY((ret>=0), "H5Aread"); - VRFY((val==1), "H5Aread"); + /* Try reading from the attribute(make certain our buffer is unmodified) */ ret = + H5Aread(attr, H5T_NATIVE_INT, &val); + VRFY((ret >= 0), "H5Aread"); + VRFY((val == 1), "H5Aread"); H5Pclose(plist); H5Pclose(dxpl); @@ -450,26 +444,19 @@ void null_dataset(void) * Actual data is _not_ written to these datasets. Dataspaces are exact * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over * the boundary of interest. - * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. - * - * JRM - 8/11/04 */ -void big_dataset(void) +void +big_dataset(void) { - int mpi_size, mpi_rank; /* MPI info */ - hid_t iof, /* File ID */ - fapl, /* File access property list ID */ - dataset, /* Dataset ID */ - filespace; /* Dataset's dataspace ID */ - hsize_t file_dims [4]; /* Dimensions of dataspace */ - char dname[]="dataset"; /* Name of dataset */ - MPI_Offset file_size; /* Size of file on disk */ - herr_t ret; /* Generic return value */ + int mpi_size, mpi_rank; /* MPI info */ + hid_t iof, /* File ID */ + fapl, /* File access property list ID */ + dataset, /* Dataset ID */ + filespace; /* Dataset's dataspace ID */ + hsize_t file_dims[4]; /* Dimensions of dataspace */ + char dname[] = "dataset"; /* Name of dataset */ + MPI_Offset file_size; /* Size of file on disk */ + herr_t ret; /* Generic return value */ const char *filename; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -490,11 +477,11 @@ void big_dataset(void) VRFY((iof >= 0), "H5Fcreate succeeded"); /* Define dataspace for 2GB dataspace */ - file_dims[0]= 2; - file_dims[1]= 1024; - file_dims[2]= 1024; - file_dims[3]= 1024; - filespace = H5Screate_simple(4, file_dims, NULL); + file_dims[0] = 2; + file_dims[1] = 1024; + file_dims[2] = 1024; + file_dims[3] = 1024; + filespace = H5Screate_simple(4, file_dims, NULL); VRFY((filespace >= 0), "H5Screate_simple succeeded"); dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -510,7 +497,7 @@ void big_dataset(void) /* Check that file of the correct size was created */ file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 2147485792ULL), "File is correct size(~2GB)"); + VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)"); /* * Create >4GB HDF5 file @@ -519,11 +506,11 @@ void big_dataset(void) VRFY((iof >= 0), "H5Fcreate succeeded"); /* Define dataspace for 4GB dataspace */ - file_dims[0]= 4; - file_dims[1]= 1024; - file_dims[2]= 1024; - file_dims[3]= 1024; - filespace = H5Screate_simple(4, file_dims, NULL); + file_dims[0] = 4; + file_dims[1] = 1024; + file_dims[2] = 1024; + file_dims[3] = 1024; + filespace = H5Screate_simple(4, file_dims, NULL); VRFY((filespace >= 0), "H5Screate_simple succeeded"); dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -539,7 +526,7 @@ void big_dataset(void) /* Check that file of the correct size was created */ file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 4294969440ULL), "File is correct size(~4GB)"); + VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)"); /* * Create >8GB HDF5 file @@ -548,11 +535,11 @@ void big_dataset(void) VRFY((iof >= 0), "H5Fcreate succeeded"); /* Define dataspace for 8GB dataspace */ - file_dims[0]= 8; - file_dims[1]= 1024; - file_dims[2]= 1024; - file_dims[3]= 1024; - filespace = H5Screate_simple(4, file_dims, NULL); + file_dims[0] = 8; + file_dims[1] = 1024; + file_dims[2] = 1024; + file_dims[3] = 1024; + filespace = H5Screate_simple(4, file_dims, NULL); VRFY((filespace >= 0), "H5Screate_simple succeeded"); dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -568,7 +555,7 @@ void big_dataset(void) /* Check that file of the correct size was created */ file_size = h5_get_file_size(filename, fapl); - VRFY((file_size == 8589936736ULL), "File is correct size(~8GB)"); + VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)"); /* Close fapl */ ret = H5Pclose(fapl); @@ -578,37 +565,31 @@ void big_dataset(void) /* Example of using PHDF5 to read a partial written dataset. The dataset does * not have actual data written to the entire raw data area and relies on the * default fill value of zeros to work correctly. - * - * Changes: Removed the assert that mpi_size <= the SIZE #define. - * As best I can tell, this assert isn't needed here, - * and in any case, the SIZE #define is being removed - * in an update of the functions in this file to run - * with an arbitrary number of processes. - * - * Also added code to free dynamically allocated buffers. - * - * JRM - 8/11/04 */ -void dataset_fillvalue(void) +void +dataset_fillvalue(void) { - int mpi_size, mpi_rank; /* MPI info */ - int err_num; /* Number of errors */ - hid_t iof, /* File ID */ - fapl, /* File access property list ID */ - dxpl, /* Data transfer property list ID */ - dataset, /* Dataset ID */ - memspace, /* Memory dataspace ID */ - filespace; /* Dataset's dataspace ID */ - char dname[]="dataset"; /* Name of dataset */ + int mpi_size, mpi_rank; /* MPI info */ + int err_num; /* Number of errors */ + hid_t iof, /* File ID */ + fapl, /* File access property list ID */ + dxpl, /* Data transfer property list ID */ + dataset, /* Dataset ID */ + memspace, /* Memory dataspace ID */ + filespace; /* Dataset's dataspace ID */ + char dname[] = "dataset"; /* Name of dataset */ hsize_t dset_dims[4] = {0, 6, 7, 8}; hsize_t req_start[4] = {0, 0, 0, 0}; hsize_t req_count[4] = {1, 6, 7, 8}; - hsize_t dset_size; /* Dataset size */ - int *rdata, *wdata; /* Buffers for data to read and write */ - int *twdata, *trdata; /* Temporary pointer into buffer */ - int acc, i, j, k, l; /* Local index variables */ - herr_t ret; /* Generic return value */ + hsize_t dset_size; /* Dataset size */ + int *rdata, *wdata; /* Buffers for data to read and write */ + int *twdata, *trdata; /* Temporary pointer into buffer */ + int acc, i, ii, j, k, l; /* Local index variables */ + herr_t ret; /* Generic return value */ const char *filename; +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + hbool_t prop_value; +#endif MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -617,13 +598,13 @@ void dataset_fillvalue(void) /* Set the dataset dimension to be one row more than number of processes */ /* and calculate the actual dataset size. */ - dset_dims[0]=mpi_size+1; - dset_size=dset_dims[0]*dset_dims[1]*dset_dims[2]*dset_dims[3]; + dset_dims[0] = (hsize_t)(mpi_size + 1); + dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3]; /* Allocate space for the buffers */ - rdata=HDmalloc((size_t)(dset_size*sizeof(int))); + rdata = HDmalloc((size_t)(dset_size * sizeof(int))); VRFY((rdata != NULL), "HDcalloc succeeded for read buffer"); - wdata=HDmalloc((size_t)(dset_size*sizeof(int))); + wdata = HDmalloc((size_t)(dset_size * sizeof(int))); VRFY((wdata != NULL), "HDmalloc succeeded for write buffer"); fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); @@ -647,27 +628,61 @@ void dataset_fillvalue(void) /* * Read dataset before any data is written. */ - /* set entire read buffer with the constant 2 */ - HDmemset(rdata,2,(size_t)(dset_size*sizeof(int))); - /* Independently read the entire dataset back */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - VRFY((ret >= 0), "H5Dread succeeded"); - /* Verify all data read are the fill value 0 */ - trdata = rdata; - err_num = 0; - for(i = 0; i < (int)dset_dims[0]; i++) - for(j = 0; j < (int)dset_dims[1]; j++) - for(k = 0; k < (int)dset_dims[2]; k++) - for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++) - if(*trdata != 0) - if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata); - if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if(err_num){ - printf("%d errors found in check_value\n", err_num); - nerrors++; + /* Create DXPL for I/O */ + dxpl = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl >= 0), "H5Pcreate succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL, + NULL, NULL, NULL, NULL, NULL); + VRFY((ret >= 0), "testing property list inserted succeeded"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + for (ii = 0; ii < 2; ii++) { + + if (ii == 0) + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); + else + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* set entire read buffer with the constant 2 */ + HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int))); + + /* Read the entire dataset back */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata); + VRFY((ret >= 0), "H5Dread succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = FALSE; + ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), "testing property list get succeeded"); + if (ii == 0) + VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast"); + else + VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* Verify all data read are the fill value 0 */ + trdata = rdata; + err_num = 0; + for (i = 0; i < (int)dset_dims[0]; i++) + for (j = 0; j < (int)dset_dims[1]; j++) + for (k = 0; k < (int)dset_dims[2]; k++) + for (l = 0; l < (int)dset_dims[3]; l++, trdata++) + if (*trdata != 0) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf( + "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", + mpi_rank, i, j, k, l, *trdata); + if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("Rank %d: [more errors ...]\n", mpi_rank); + if (err_num) { + HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num); + nerrors++; + } } /* Barrier to ensure all processes have completed the above test. */ @@ -677,30 +692,25 @@ void dataset_fillvalue(void) * Each process writes 1 row of data. Thus last row is not written. */ /* Create hyperslabs in memory and file dataspaces */ - req_start[0]=mpi_rank; - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); + req_start[0] = (hsize_t)mpi_rank; + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL); VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace"); - /* Create DXPL for collective I/O */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); } - /* Fill write buffer with some values */ - twdata=wdata; - for(i=0, acc=0; i<(int)dset_dims[0]; i++) - for(j=0; j<(int)dset_dims[1]; j++) - for(k=0; k<(int)dset_dims[2]; k++) - for(l=0; l<(int)dset_dims[3]; l++) + twdata = wdata; + for (i = 0, acc = 0; i < (int)dset_dims[0]; i++) + for (j = 0; j < (int)dset_dims[1]; j++) + for (k = 0; k < (int)dset_dims[2]; k++) + for (l = 0; l < (int)dset_dims[3]; l++) *twdata++ = acc++; /* Collectively write a hyperslab of data to the dataset */ @@ -713,35 +723,64 @@ void dataset_fillvalue(void) /* * Read dataset after partial write. */ - /* set entire read buffer with the constant 2 */ - HDmemset(rdata,2,(size_t)(dset_size*sizeof(int))); - /* Independently read the entire dataset back */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata); - VRFY((ret >= 0), "H5Dread succeeded"); - /* Verify correct data read */ - twdata=wdata; - trdata=rdata; - err_num=0; - for(i=0; i<(int)dset_dims[0]; i++) - for(j=0; j<(int)dset_dims[1]; j++) - for(k=0; k<(int)dset_dims[2]; k++) - for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++) - if(i<mpi_size) { - if(*twdata != *trdata ) - if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata); - } /* end if */ - else { - if(*trdata != 0) - if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata); - } /* end else */ - if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if(err_num){ - printf("%d errors found in check_value\n", err_num); - nerrors++; +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), " H5Pset succeeded"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + for (ii = 0; ii < 2; ii++) { + + if (ii == 0) + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); + else + ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* set entire read buffer with the constant 2 */ + HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int))); + + /* Read the entire dataset back */ + ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata); + VRFY((ret >= 0), "H5Dread succeeded"); + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + prop_value = FALSE; + ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value); + VRFY((ret >= 0), "testing property list get succeeded"); + if (ii == 0) + VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast"); + else + VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast"); +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* Verify correct data read */ + twdata = wdata; + trdata = rdata; + err_num = 0; + for (i = 0; i < (int)dset_dims[0]; i++) + for (j = 0; j < (int)dset_dims[1]; j++) + for (k = 0; k < (int)dset_dims[2]; k++) + for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++) + if (i < mpi_size) { + if (*twdata != *trdata) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", + i, j, k, l, *twdata, *trdata); + } /* end if */ + else { + if (*trdata != 0) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", + i, j, k, l, *trdata); + } /* end else */ + if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (err_num) { + HDprintf("%d errors found in check_value\n", err_num); + nerrors++; + } } /* Close all file objects */ @@ -769,47 +808,51 @@ void dataset_fillvalue(void) HDfree(wdata); } +/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ +void +collective_group_write_independent_group_read(void) +{ + collective_group_write(); + independent_group_read(); +} + /* Write multiple groups with a chunked dataset in each group collectively. * These groups and datasets are for testing independent read later. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/16/04 */ -void collective_group_write(void) +void +collective_group_write(void) { - int mpi_rank, mpi_size, size; - int i, j, m; - char gname[64], dname[32]; - hid_t fid, gid, did, plist, dcpl, memspace, filespace; - DATATYPE * outme = NULL; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; - hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */ - herr_t ret1, ret2; + int mpi_rank, mpi_size, size; + int i, j, m; + char gname[64], dname[32]; + hid_t fid, gid, did, plist, dcpl, memspace, filespace; + DATATYPE *outme = NULL; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; + hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */ + herr_t ret1, ret2; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; - pt = GetTestParameters(); + pt = GetTestParameters(); filename = pt->name; - ngroups = pt->count; + ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); - chunk_size[0] =(hsize_t)(size / 2); - chunk_size[1] =(hsize_t)(size / 2); + chunk_size[0] = (hsize_t)(size / 2); + chunk_size[1] = (hsize_t)(size / 2); - outme = HDmalloc((size_t)(size * size * sizeof(DATATYPE))); + outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); VRFY((outme != NULL), "HDmalloc succeeded for outme"); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + VRFY((fid >= 0), "H5Fcreate"); H5Pclose(plist); /* decide the hyperslab according to process number. */ @@ -818,54 +861,57 @@ void collective_group_write(void) /* select hyperslab in memory and file spaces. These two operations are * identical since the datasets are the same. */ memspace = H5Screate_simple(DIM, file_dims, NULL); - ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, - chunk_dims, count, chunk_dims); - filespace = H5Screate_simple(DIM, file_dims, NULL); - ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, - chunk_dims, count, chunk_dims); - VRFY((memspace>=0), "memspace"); - VRFY((filespace>=0), "filespace"); - VRFY((ret1>=0), "mgroup memspace selection"); - VRFY((ret2>=0), "mgroup filespace selection"); + ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + filespace = H5Screate_simple(DIM, file_dims, NULL); + ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((memspace >= 0), "memspace"); + VRFY((filespace >= 0), "filespace"); + VRFY((ret1 == 0), "mgroup memspace selection"); + VRFY((ret2 == 0), "mgroup filespace selection"); dcpl = H5Pcreate(H5P_DATASET_CREATE); ret1 = H5Pset_chunk(dcpl, 2, chunk_size); - VRFY((dcpl>=0), "dataset creation property"); - VRFY((ret1>=0), "set chunk for dataset creation property"); + VRFY((dcpl >= 0), "dataset creation property"); + VRFY((ret1 == 0), "set chunk for dataset creation property"); /* creates ngroups groups under the root group, writes chunked * datasets in parallel. */ - for(m = 0; m < ngroups; m++) { - sprintf(gname, "group%d", m); + for (m = 0; m < ngroups; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((gid > 0), gname); - sprintf(dname, "dataset%d", m); + HDsnprintf(dname, sizeof(dname), "dataset%d", m); did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT); VRFY((did > 0), dname); - for(i = 0; i < size; i++) - for(j = 0; j < size; j++) - outme[(i * size) + j] =(i + j) * 1000 + mpi_rank; + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = (i + j) * 1000 + mpi_rank; - H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, - outme); + ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); + VRFY((ret1 == 0), "H5Dwrite"); - H5Dclose(did); - H5Gclose(gid); + ret1 = H5Dclose(did); + VRFY((ret1 == 0), "H5Dclose"); + + ret1 = H5Gclose(gid); + VRFY((ret1 == 0), "H5Gclose"); #ifdef BARRIER_CHECKS - if(!((m+1) % 10)) { - printf("created %d groups\n", m+1); + if (!((m + 1) % 10)) { + HDprintf("created %d groups\n", m + 1); MPI_Barrier(MPI_COMM_WORLD); - } + } #endif /* BARRIER_CHECKS */ } H5Pclose(dcpl); H5Sclose(filespace); H5Sclose(memspace); - H5Fclose(fid); + + ret1 = H5Fclose(fid); + VRFY((ret1 == 0), "H5Fclose"); HDfree(outme); } @@ -873,91 +919,89 @@ void collective_group_write(void) /* Let two sets of processes open and read different groups and chunked * datasets independently. */ -void independent_group_read(void) +void +independent_group_read(void) { - int mpi_rank, m; - hid_t plist, fid; + int mpi_rank, m; + hid_t plist, fid; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; + herr_t ret; - pt = GetTestParameters(); + pt = GetTestParameters(); filename = pt->name; - ngroups = pt->count; + ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + H5Pset_all_coll_metadata_ops(plist, FALSE); + fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); + VRFY((fid > 0), "H5Fopen"); H5Pclose(plist); /* open groups and read datasets. Odd number processes read even number * groups from the end; even number processes read odd number groups * from the beginning. */ - if(mpi_rank%2==0) { - for(m=ngroups-1; m==0; m-=2) + if (mpi_rank % 2 == 0) { + for (m = ngroups - 1; m == 0; m -= 2) group_dataset_read(fid, mpi_rank, m); - } else { - for(m=0; m<ngroups; m+=2) + } + else { + for (m = 0; m < ngroups; m += 2) group_dataset_read(fid, mpi_rank, m); } - H5Fclose(fid); + ret = H5Fclose(fid); + VRFY((ret == 0), "H5Fclose"); } /* Open and read datasets and compare data - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * Also added code to verify the results of dynamic memory - * allocations, and to free dynamically allocated memeory - * when we are done with it. - * - * JRM - 8/16/04 */ static void group_dataset_read(hid_t fid, int mpi_rank, int m) { - int ret, i, j, size; - char gname[64], dname[32]; - hid_t gid, did; + int ret, i, j, size; + char gname[64], dname[32]; + hid_t gid, did; DATATYPE *outdata = NULL; - DATATYPE *indata = NULL; + DATATYPE *indata = NULL; size = get_size(); - indata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE))); + indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); VRFY((indata != NULL), "HDmalloc succeeded for indata"); - outdata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE))); + outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); VRFY((outdata != NULL), "HDmalloc succeeded for outdata"); /* open every group under root group. */ - sprintf(gname, "group%d", m); + HDsnprintf(gname, sizeof(gname), "group%d", m); gid = H5Gopen2(fid, gname, H5P_DEFAULT); VRFY((gid > 0), gname); /* check the data. */ - sprintf(dname, "dataset%d", m); + HDsnprintf(dname, sizeof(dname), "dataset%d", m); did = H5Dopen2(gid, dname, H5P_DEFAULT); - VRFY((did>0), dname); + VRFY((did > 0), dname); H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata); /* this is the original value */ - for(i=0; i<size; i++) - for(j=0; j<size; j++) { - outdata[(i * size) + j] =(i+j)*1000 + mpi_rank; - } + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank; /* compare the original value(outdata) to the value in file(indata).*/ ret = check_value(indata, outdata, size); - VRFY((ret==0), "check the data"); + VRFY((ret == 0), "check the data"); - H5Dclose(did); - H5Gclose(gid); + ret = H5Dclose(did); + VRFY((ret == 0), "H5Dclose"); + ret = H5Gclose(gid); + VRFY((ret == 0), "H5Gclose"); HDfree(indata); HDfree(outdata); @@ -989,28 +1033,24 @@ group_dataset_read(hid_t fid, int mpi_rank, int m) * + means the group has attribute(s). * ' means the datasets in the groups have attribute(s). * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/16/04 */ -void multiple_group_write(void) +void +multiple_group_write(void) { - int mpi_rank, mpi_size, size; - int m; - char gname[64]; - hid_t fid, gid, plist, memspace, filespace; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; - herr_t ret; + int mpi_rank, mpi_size, size; + int m; + char gname[64]; + hid_t fid, gid, plist, memspace, filespace; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; + herr_t ret; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; - pt = GetTestParameters(); + pt = GetTestParameters(); filename = pt->name; - ngroups = pt->count; + ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -1018,7 +1058,7 @@ void multiple_group_write(void) size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist); H5Pclose(plist); /* decide the hyperslab according to process number. */ @@ -1026,38 +1066,36 @@ void multiple_group_write(void) /* select hyperslab in memory and file spaces. These two operations are * identical since the datasets are the same. */ - memspace = H5Screate_simple(DIM, file_dims, NULL); - VRFY((memspace>=0), "memspace"); - ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, - chunk_dims, count, chunk_dims); - VRFY((ret>=0), "mgroup memspace selection"); + memspace = H5Screate_simple(DIM, file_dims, NULL); + VRFY((memspace >= 0), "memspace"); + ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((ret >= 0), "mgroup memspace selection"); - filespace = H5Screate_simple(DIM, file_dims, NULL); - VRFY((filespace>=0), "filespace"); - ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, - chunk_dims, count, chunk_dims); - VRFY((ret>=0), "mgroup filespace selection"); + filespace = H5Screate_simple(DIM, file_dims, NULL); + VRFY((filespace >= 0), "filespace"); + ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); + VRFY((ret >= 0), "mgroup filespace selection"); /* creates ngroups groups under the root group, writes datasets in * parallel. */ - for(m = 0; m < ngroups; m++) { - sprintf(gname, "group%d", m); + for (m = 0; m < ngroups; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((gid > 0), gname); /* create attribute for these groups. */ - write_attribute(gid, is_group, m); + write_attribute(gid, is_group, m); - if(m != 0) - write_dataset(memspace, filespace, gid); + if (m != 0) + write_dataset(memspace, filespace, gid); H5Gclose(gid); #ifdef BARRIER_CHECKS - if(!((m+1) % 10)) { - printf("created %d groups\n", m+1); + if (!((m + 1) % 10)) { + HDprintf("created %d groups\n", m + 1); MPI_Barrier(MPI_COMM_WORLD); - } + } #endif /* BARRIER_CHECKS */ } @@ -1065,58 +1103,52 @@ void multiple_group_write(void) gid = H5Gopen2(fid, "group0", H5P_DEFAULT); create_group_recursive(memspace, filespace, gid, 0); ret = H5Gclose(gid); - VRFY((ret>=0), "H5Gclose"); + VRFY((ret >= 0), "H5Gclose"); ret = H5Sclose(filespace); - VRFY((ret>=0), "H5Sclose"); + VRFY((ret >= 0), "H5Sclose"); ret = H5Sclose(memspace); - VRFY((ret>=0), "H5Sclose"); + VRFY((ret >= 0), "H5Sclose"); ret = H5Fclose(fid); - VRFY((ret>=0), "H5Fclose"); + VRFY((ret >= 0), "H5Fclose"); } /* * In a group, creates NDATASETS datasets. Each process writes a hyperslab * of a data array to the file. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/16/04 */ static void write_dataset(hid_t memspace, hid_t filespace, hid_t gid) { - int i, j, n, size; - int mpi_rank, mpi_size; - char dname[32]; - DATATYPE * outme = NULL; - hid_t did; + int i, j, n, size; + int mpi_rank, mpi_size; + char dname[32]; + DATATYPE *outme = NULL; + hid_t did; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); - outme = HDmalloc((size_t)(size * size * sizeof(double))); + outme = HDmalloc((size_t)size * (size_t)size * sizeof(double)); VRFY((outme != NULL), "HDmalloc succeeded for outme"); - for(n = 0; n < NDATASET; n++) { - sprintf(dname, "dataset%d", n); - did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((did > 0), dname); + for (n = 0; n < NDATASET; n++) { + HDsnprintf(dname, sizeof(dname), "dataset%d", n); + did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((did > 0), dname); - for(i = 0; i < size; i++) - for(j = 0; j < size; j++) - outme[(i * size) + j] = n * 1000 + mpi_rank; + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) + outme[(i * size) + j] = n * 1000 + mpi_rank; - H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); + H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme); - /* create attribute for these datasets.*/ - write_attribute(did, is_dset, n); + /* create attribute for these datasets.*/ + write_attribute(did, is_dset, n); - H5Dclose(did); + H5Dclose(did); } HDfree(outme); } @@ -1128,57 +1160,52 @@ write_dataset(hid_t memspace, hid_t filespace, hid_t gid) static void create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter) { - hid_t child_gid; - int mpi_rank; - char gname[64]; + hid_t child_gid; + int mpi_rank; + char gname[64]; - MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); #ifdef BARRIER_CHECKS - if(!((counter+1) % 10)) { - printf("created %dth child groups\n", counter+1); + if (!((counter + 1) % 10)) { + HDprintf("created %dth child groups\n", counter + 1); MPI_Barrier(MPI_COMM_WORLD); - } + } #endif /* BARRIER_CHECKS */ - sprintf(gname, "%dth_child_group", counter+1); - child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((child_gid > 0), gname); + HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1); + child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((child_gid > 0), gname); - /* write datasets in parallel. */ - write_dataset(memspace, filespace, gid); + /* write datasets in parallel. */ + write_dataset(memspace, filespace, gid); - if(counter < GROUP_DEPTH ) - create_group_recursive(memspace, filespace, child_gid, counter+1); + if (counter < GROUP_DEPTH) + create_group_recursive(memspace, filespace, child_gid, counter + 1); - H5Gclose(child_gid); + H5Gclose(child_gid); } /* * This function is to verify the data from multiple group testing. It opens * every dataset in every group and check their correctness. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ -void multiple_group_read(void) +void +multiple_group_read(void) { - int mpi_rank, mpi_size, error_num, size; - int m; - char gname[64]; - hid_t plist, fid, gid, memspace, filespace; - hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; + int mpi_rank, mpi_size, error_num, size; + int m; + char gname[64]; + hid_t plist, fid, gid, memspace, filespace; + hsize_t chunk_origin[DIM]; + hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM]; const H5Ptest_param_t *pt; - char *filename; - int ngroups; + char *filename; + int ngroups; - pt = GetTestParameters(); + pt = GetTestParameters(); filename = pt->name; - ngroups = pt->count; + ngroups = pt->count; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); @@ -1186,106 +1213,96 @@ void multiple_group_read(void) size = get_size(); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); - fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); + fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); H5Pclose(plist); /* decide hyperslab for each process */ get_slab(chunk_origin, chunk_dims, count, file_dims, size); /* select hyperslab for memory and file space */ - memspace = H5Screate_simple(DIM, file_dims, NULL); - H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, - count, chunk_dims); + memspace = H5Screate_simple(DIM, file_dims, NULL); + H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); filespace = H5Screate_simple(DIM, file_dims, NULL); - H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, - count, chunk_dims); + H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims); /* open every group under root group. */ - for(m=0; m<ngroups; m++) { - sprintf(gname, "group%d", m); + for (m = 0; m < ngroups; m++) { + HDsnprintf(gname, sizeof(gname), "group%d", m); gid = H5Gopen2(fid, gname, H5P_DEFAULT); VRFY((gid > 0), gname); /* check the data. */ - if(m != 0) - if((error_num = read_dataset(memspace, filespace, gid))>0) - nerrors += error_num; + if (m != 0) + if ((error_num = read_dataset(memspace, filespace, gid)) > 0) + nerrors += error_num; /* check attribute.*/ error_num = 0; - if((error_num = read_attribute(gid, is_group, m))>0 ) - nerrors += error_num; + if ((error_num = read_attribute(gid, is_group, m)) > 0) + nerrors += error_num; H5Gclose(gid); #ifdef BARRIER_CHECKS - if(!((m+1)%10)) + if (!((m + 1) % 10)) MPI_Barrier(MPI_COMM_WORLD); #endif /* BARRIER_CHECKS */ } /* open all the groups in vertical direction. */ gid = H5Gopen2(fid, "group0", H5P_DEFAULT); - VRFY((gid>0), "group0"); + VRFY((gid > 0), "group0"); recursive_read_group(memspace, filespace, gid, 0); H5Gclose(gid); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(fid); - } /* * This function opens all the datasets in a certain, checks the data using * dataset_vrfy function. - * - * Changes: Updated function to use a dynamically calculated size, - * instead of the old SIZE #define. This should allow it - * to function with an arbitrary number of processors. - * - * JRM - 8/11/04 */ static int read_dataset(hid_t memspace, hid_t filespace, hid_t gid) { - int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0; - char dname[32]; + int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0; + char dname[32]; DATATYPE *outdata = NULL, *indata = NULL; - hid_t did; + hid_t did; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); size = get_size(); - indata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE))); + indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); VRFY((indata != NULL), "HDmalloc succeeded for indata"); - outdata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE))); + outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE)); VRFY((outdata != NULL), "HDmalloc succeeded for outdata"); - for(n=0; n<NDATASET; n++) { - sprintf(dname, "dataset%d", n); + for (n = 0; n < NDATASET; n++) { + HDsnprintf(dname, sizeof(dname), "dataset%d", n); did = H5Dopen2(gid, dname, H5P_DEFAULT); - VRFY((did>0), dname); + VRFY((did > 0), dname); - H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, - indata); + H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata); /* this is the original value */ - for(i=0; i<size; i++) - for(j=0; j<size; j++) { - *outdata = n*1000 + mpi_rank; - outdata++; - } + for (i = 0; i < size; i++) + for (j = 0; j < size; j++) { + *outdata = n * 1000 + mpi_rank; + outdata++; + } outdata -= size * size; /* compare the original value(outdata) to the value in file(indata).*/ vrfy_errors = check_value(indata, outdata, size); /* check attribute.*/ - if((attr_errors = read_attribute(did, is_dset, n))>0 ) + if ((attr_errors = read_attribute(did, is_dset, n)) > 0) vrfy_errors += attr_errors; H5Dclose(did); @@ -1305,23 +1322,23 @@ static void recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter) { hid_t child_gid; - int mpi_rank, err_num=0; + int mpi_rank, err_num = 0; char gname[64]; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); #ifdef BARRIER_CHECKS - if((counter+1) % 10) + if ((counter + 1) % 10) MPI_Barrier(MPI_COMM_WORLD); #endif /* BARRIER_CHECKS */ - if((err_num = read_dataset(memspace, filespace, gid)) ) + if ((err_num = read_dataset(memspace, filespace, gid))) nerrors += err_num; - if(counter < GROUP_DEPTH ) { - sprintf(gname, "%dth_child_group", counter+1); + if (counter < GROUP_DEPTH) { + HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1); child_gid = H5Gopen2(gid, gname, H5P_DEFAULT); - VRFY((child_gid>0), gname); - recursive_read_group(memspace, filespace, child_gid, counter+1); + VRFY((child_gid > 0), gname); + recursive_read_group(memspace, filespace, child_gid, counter + 1); H5Gclose(child_gid); } } @@ -1333,23 +1350,23 @@ static void write_attribute(hid_t obj_id, int this_type, int num) { hid_t sid, aid; - hsize_t dspace_dims[1]={8}; - int i, mpi_rank, attr_data[8], dspace_rank=1; + hsize_t dspace_dims[1] = {8}; + int i, mpi_rank, attr_data[8], dspace_rank = 1; char attr_name[32]; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if(this_type == is_group) { - sprintf(attr_name, "Group Attribute %d", num); + if (this_type == is_group) { + HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num); sid = H5Screate(H5S_SCALAR); aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); - H5Awrite(aid, H5T_NATIVE_INT, &num); + H5Awrite(aid, H5T_NATIVE_INT, &num); H5Aclose(aid); H5Sclose(sid); } /* end if */ - else if(this_type == is_dset) { - sprintf(attr_name, "Dataset Attribute %d", num); - for(i=0; i<8; i++) + else if (this_type == is_dset) { + HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num); + for (i = 0; i < 8; i++) attr_data[i] = i; sid = H5Screate_simple(dspace_rank, dspace_dims, NULL); aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT); @@ -1357,38 +1374,33 @@ write_attribute(hid_t obj_id, int this_type, int num) H5Aclose(aid); H5Sclose(sid); } /* end else-if */ - } /* Read and verify attribute for group or dataset. */ static int read_attribute(hid_t obj_id, int this_type, int num) { - hid_t aid; - hsize_t group_block[2]={1,1}, dset_block[2]={1, 8}; - int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0; - char attr_name[32]; + hid_t aid; + hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8}; + int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0; + char attr_name[32]; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if(this_type == is_group) { - sprintf(attr_name, "Group Attribute %d", num); + if (this_type == is_group) { + HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num); aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); - if(MAINPROCESS) { - H5Aread(aid, H5T_NATIVE_INT, &in_num); - vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num); - } + H5Aread(aid, H5T_NATIVE_INT, &in_num); + vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num); H5Aclose(aid); } - else if(this_type == is_dset) { - sprintf(attr_name, "Dataset Attribute %d", num); - for(i=0; i<8; i++) + else if (this_type == is_dset) { + HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num); + for (i = 0; i < 8; i++) out_data[i] = i; aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT); - if(MAINPROCESS) { - H5Aread(aid, H5T_NATIVE_INT, in_data); - vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data); - } + H5Aread(aid, H5T_NATIVE_INT, in_data); + vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data); H5Aclose(aid); } @@ -1397,70 +1409,59 @@ read_attribute(hid_t obj_id, int this_type, int num) /* This functions compares the original data with the read-in data for its * hyperslab part only by process ID. - * - * Changes: Modified function to use a passed in size parameter - * instead of the old SIZE #define. This should let us - * run with an arbitrary number of processes. - * - * JRM - 8/16/04 */ static int check_value(DATATYPE *indata, DATATYPE *outdata, int size) { - int mpi_rank, mpi_size, err_num=0; + int mpi_rank, mpi_size, err_num = 0; hsize_t i, j; hsize_t chunk_origin[DIM]; - hsize_t chunk_dims[DIM], count[DIM]; + hsize_t chunk_dims[DIM], count[DIM]; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); get_slab(chunk_origin, chunk_dims, count, NULL, size); - indata += chunk_origin[0]*size; - outdata += chunk_origin[0]*size; - for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++) - for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) { - if(*indata != *outdata ) - if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED) - printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata); - } - if(err_num > MAX_ERR_REPORT && !VERBOSE_MED) - printf("[more errors ...]\n"); - if(err_num) - printf("%d errors found in check_value\n", err_num); + indata += chunk_origin[0] * (hsize_t)size; + outdata += chunk_origin[0] * (hsize_t)size; + for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++) + for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) { + if (*indata != *outdata) + if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED) + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata, + *indata); + } + if (err_num > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("[more errors ...]\n"); + if (err_num) + HDprintf("%d errors found in check_value\n", err_num); return err_num; } /* Decide the portion of data chunk in dataset by process ID. - * - * Changes: Modified function to use a passed in size parameter - * instead of the old SIZE #define. This should let us - * run with an arbitrary number of processes. - * - * JRM - 8/11/04 */ static void -get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], - hsize_t file_dims[], int size) +get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size) { int mpi_rank, mpi_size; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - if(chunk_origin != NULL) { - chunk_origin[0] = mpi_rank *(size/mpi_size); + if (chunk_origin != NULL) { + chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size); chunk_origin[1] = 0; } - if(chunk_dims != NULL) { - chunk_dims[0] = size/mpi_size; - chunk_dims[1] = size; + if (chunk_dims != NULL) { + chunk_dims[0] = (hsize_t)(size / mpi_size); + chunk_dims[1] = (hsize_t)size; } - if(file_dims != NULL) - file_dims[0] = file_dims[1] = size; - if(count != NULL) + if (file_dims != NULL) + file_dims[0] = file_dims[1] = (hsize_t)size; + if (count != NULL) count[0] = count[1] = 1; } @@ -1481,30 +1482,28 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], * This function reproduces this situation. At present the test hangs * on failure. * JRM - 9/13/04 - * - * Changes: None. */ #define N 4 -void io_mode_confusion(void) +void +io_mode_confusion(void) { /* * HDF5 APIs definitions */ - const int rank = 1; + const int rank = 1; const char *dataset_name = "IntArray"; - hid_t file_id, dset_id; /* file and dataset identifiers */ - hid_t filespace, memspace; /* file and memory dataspace */ - /* identifiers */ - hsize_t dimsf[1]; /* dataset dimensions */ - int data[N] = {1}; /* pointer to data buffer to write */ - hsize_t coord[N] = {0L,1L,2L,3L}; - hid_t plist_id; /* property list identifier */ - herr_t status; - + hid_t file_id, dset_id; /* file and dataset identifiers */ + hid_t filespace, memspace; /* file and memory dataspace */ + /* identifiers */ + hsize_t dimsf[1]; /* dataset dimensions */ + int data[N] = {1}; /* pointer to data buffer to write */ + hsize_t coord[N] = {0L, 1L, 2L, 3L}; + hid_t plist_id; /* property list identifier */ + herr_t status; /* * MPI variables @@ -1512,18 +1511,16 @@ void io_mode_confusion(void) int mpi_size, mpi_rank; - /* * test bed related variables */ - const char * fcn_name = "io_mode_confusion"; - const hbool_t verbose = FALSE; - const H5Ptest_param_t * pt; - char * filename; - + const char *fcn_name = "io_mode_confusion"; + const hbool_t verbose = FALSE; + const H5Ptest_param_t *pt; + char *filename; - pt = GetTestParameters(); + pt = GetTestParameters(); filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -1533,183 +1530,154 @@ void io_mode_confusion(void) * Set up file access property list with parallel I/O access */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Setting up property list.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); plist_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((plist_id != -1), "H5Pcreate() failed"); status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL); - VRFY((status >= 0 ), "H5Pset_fapl_mpio() failed"); - + VRFY((status >= 0), "H5Pset_fapl_mpio() failed"); /* * Create a new file collectively and release property list identifier. */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name); file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); - VRFY((file_id >= 0 ), "H5Fcreate() failed"); + VRFY((file_id >= 0), "H5Fcreate() failed"); status = H5Pclose(plist_id); - VRFY((status >= 0 ), "H5Pclose() failed"); - + VRFY((status >= 0), "H5Pclose() failed"); /* * Create the dataspace for the dataset. */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name); - dimsf[0] = N; + dimsf[0] = N; filespace = H5Screate_simple(rank, dimsf, NULL); - VRFY((filespace >= 0 ), "H5Screate_simple() failed."); - + VRFY((filespace >= 0), "H5Screate_simple() failed."); /* * Create the dataset with default properties and close filespace. */ - if(verbose ) - HDfprintf(stdout, - "%0d:%s: Creating the dataset, and closing filespace.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name); - dset_id = H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dset_id >= 0 ), "H5Dcreate2() failed"); + dset_id = + H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2() failed"); status = H5Sclose(filespace); - VRFY((status >= 0 ), "H5Sclose() failed"); - + VRFY((status >= 0), "H5Sclose() failed"); - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name); memspace = H5Screate_simple(rank, dimsf, NULL); - VRFY((memspace >= 0 ), "H5Screate_simple() failed."); - + VRFY((memspace >= 0), "H5Screate_simple() failed."); - if(mpi_rank == 0 ) { - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", - mpi_rank, fcn_name); + if (mpi_rank == 0) { + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name); status = H5Sselect_all(memspace); - VRFY((status >= 0 ), "H5Sselect_all() failed"); - } else { - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", - mpi_rank, fcn_name); + VRFY((status >= 0), "H5Sselect_all() failed"); + } + else { + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name); status = H5Sselect_none(memspace); - VRFY((status >= 0 ), "H5Sselect_none() failed"); + VRFY((status >= 0), "H5Sselect_none() failed"); } - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); MPI_Barrier(MPI_COMM_WORLD); - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name); filespace = H5Dget_space(dset_id); - VRFY((filespace >= 0 ), "H5Dget_space() failed"); - + VRFY((filespace >= 0), "H5Dget_space() failed"); /* select all */ - if(mpi_rank == 0 ) { - if(verbose ) - HDfprintf(stdout, - "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", - mpi_rank, fcn_name); + if (mpi_rank == 0) { + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name); status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord); - VRFY((status >= 0 ), "H5Sselect_elements() failed"); - } else { /* select nothing */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", - mpi_rank, fcn_name); + VRFY((status >= 0), "H5Sselect_elements() failed"); + } + else { /* select nothing */ + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name); status = H5Sselect_none(filespace); - VRFY((status >= 0 ), "H5Sselect_none() failed"); + VRFY((status >= 0), "H5Sselect_none() failed"); } - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name); MPI_Barrier(MPI_COMM_WORLD); - - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name); plist_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((plist_id != -1 ), "H5Pcreate() failed"); - + VRFY((plist_id != -1), "H5Pcreate() failed"); - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name); status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - VRFY((status >= 0 ), "H5Pset_dxpl_mpio() failed"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO); - VRFY((status>= 0),"set independent IO collectively succeeded"); + VRFY((status >= 0), "H5Pset_dxpl_mpio() failed"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((status >= 0), "set independent IO collectively succeeded"); } + if (verbose) + HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name); + status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data); - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", - mpi_rank, fcn_name); - - status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, - plist_id, data); - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", - mpi_rank, fcn_name, status); - VRFY((status >= 0 ), "H5Dwrite() failed"); + if (verbose) + HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status); + VRFY((status >= 0), "H5Dwrite() failed"); /* * Close/release resources. */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name); status = H5Dclose(dset_id); - VRFY((status >= 0 ), "H5Dclose() failed"); + VRFY((status >= 0), "H5Dclose() failed"); status = H5Sclose(filespace); - VRFY((status >= 0 ), "H5Dclose() failed"); + VRFY((status >= 0), "H5Dclose() failed"); status = H5Sclose(memspace); - VRFY((status >= 0 ), "H5Sclose() failed"); + VRFY((status >= 0), "H5Sclose() failed"); status = H5Pclose(plist_id); - VRFY((status >= 0 ), "H5Pclose() failed"); + VRFY((status >= 0), "H5Pclose() failed"); status = H5Fclose(file_id); - VRFY((status >= 0 ), "H5Fclose() failed"); + VRFY((status >= 0), "H5Fclose() failed"); - - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); return; @@ -1721,13 +1689,13 @@ void io_mode_confusion(void) /* * At present, the object header code maintains an image of its on disk * representation, which is updates as necessary instead of generating on - * request. + * request. * * Prior to the fix that this test in designed to verify, the image of the * on disk representation was only updated on flush -- not when the object * header was marked clean. * - * This worked perfectly well as long as all writes of a given object + * This worked perfectly well as long as all writes of a given object * header were written from a single process. However, with the implementation * of round robin metadata data writes in parallel HDF5, this is no longer * the case -- it is possible for a given object header to be flushed from @@ -1735,14 +1703,14 @@ void io_mode_confusion(void) * clean in all other processes on each flush. This resulted in NULL or * out of data object header information being written to disk. * - * To repair this, I modified the object header code to update its - * on disk image both on flush on when marked clean. + * To repair this, I modified the object header code to update its + * on disk image both on flush on when marked clean. * * This test is directed at verifying that the fix performs as expected. * * The test functions by creating a HDF5 file with several small datasets, - * and then flushing the file. This should result of at least one of - * the associated object headers being flushed by a process other than + * and then flushing the file. This should result of at least one of + * the associated object headers being flushed by a process other than * process 0. * * Then for each data set, add an attribute and flush the file again. @@ -1752,73 +1720,53 @@ void io_mode_confusion(void) * Open the each of the data sets in turn. If all opens are successful, * the test passes. Otherwise the test fails. * - * Note that this test will probably become irrelevent shortly, when we + * Note that this test will probably become irrelevant shortly, when we * land the journaling modifications on the trunk -- at which point all * cache clients will have to construct on disk images on demand. * - * JRM -- 10/13/10 - * - * Changes: - * Break it into two parts, a writer to write the file and a reader - * the correctness of the writer. AKC -- 2010/10/27 + * JRM -- 10/13/10 */ -#define NUM_DATA_SETS 4 -#define LOCAL_DATA_SIZE 4 -#define LARGE_ATTR_SIZE 256 +#define NUM_DATA_SETS 4 +#define LOCAL_DATA_SIZE 4 +#define LARGE_ATTR_SIZE 256 /* Since all even and odd processes are split into writer and reader comm * respectively, process 0 and 1 in COMM_WORLD become the root process of * the writer and reader comm respectively. */ -#define Writer_Root 0 -#define Reader_Root 1 -#define Reader_wait(mpi_err, xsteps) \ - mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD) -#define Reader_result(mpi_err, xsteps_done) \ +#define Writer_Root 0 +#define Reader_Root 1 +#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD) +#define Reader_result(mpi_err, xsteps_done) \ mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD) -#define Reader_check(mpi_err, xsteps, xsteps_done) \ - { Reader_wait(mpi_err, xsteps); \ - Reader_result(mpi_err, xsteps_done);} +#define Reader_check(mpi_err, xsteps, xsteps_done) \ + { \ + Reader_wait(mpi_err, xsteps); \ + Reader_result(mpi_err, xsteps_done); \ + } /* object names used by both rr_obj_hdr_flush_confusion and * rr_obj_hdr_flush_confusion_reader. */ -const char * dataset_name[NUM_DATA_SETS] = - { - "dataset_0", - "dataset_1", - "dataset_2", - "dataset_3" - }; -const char * att_name[NUM_DATA_SETS] = - { - "attribute_0", - "attribute_1", - "attribute_2", - "attribute_3" - }; -const char * lg_att_name[NUM_DATA_SETS] = - { - "large_attribute_0", - "large_attribute_1", - "large_attribute_2", - "large_attribute_3" - }; - -void rr_obj_hdr_flush_confusion(void) +const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"}; +const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"}; +const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2", + "large_attribute_3"}; + +void +rr_obj_hdr_flush_confusion(void) { /* MPI variables */ /* private communicator size and rank */ - int mpi_size; - int mpi_rank; - int mrc; /* mpi error code */ - int is_reader; /* 1 for reader process; 0 for writer process. */ + int mpi_size; + int mpi_rank; + int mrc; /* mpi error code */ + int is_reader; /* 1 for reader process; 0 for writer process. */ MPI_Comm comm; - /* test bed related variables */ - const char * fcn_name = "rr_obj_hdr_flush_confusion"; - const hbool_t verbose = FALSE; + const char *fcn_name = "rr_obj_hdr_flush_confusion"; + const hbool_t verbose = FALSE; /* Create two new private communicators from MPI_COMM_WORLD. * Even and odd ranked processes go to comm_writers and comm_readers @@ -1830,10 +1778,10 @@ void rr_obj_hdr_flush_confusion(void) HDassert(mpi_size > 2); is_reader = mpi_rank % 2; - mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split"); + mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split"); - /* The reader proocesses branches off to do reading + /* The reader processes branches off to do reading * while the writer processes continues to do writing * Whenever writers finish one writing step, including a H5Fflush, * they inform the readers, via MPI_COMM_WORLD, to verify. @@ -1841,32 +1789,33 @@ void rr_obj_hdr_flush_confusion(void) * step. When all steps are done, they inform readers to end. */ if (is_reader) - rr_obj_hdr_flush_confusion_reader(comm); + rr_obj_hdr_flush_confusion_reader(comm); else - rr_obj_hdr_flush_confusion_writer(comm); + rr_obj_hdr_flush_confusion_writer(comm); MPI_Comm_free(&comm); - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); return; } /* rr_obj_hdr_flush_confusion() */ -void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) +void +rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) { - int i; - int j; - hid_t file_id = -1; - hid_t fapl_id = -1; - hid_t dxpl_id = -1; - hid_t att_id[NUM_DATA_SETS]; - hid_t att_space[NUM_DATA_SETS]; - hid_t lg_att_id[NUM_DATA_SETS]; - hid_t lg_att_space[NUM_DATA_SETS]; - hid_t disk_space[NUM_DATA_SETS]; - hid_t mem_space[NUM_DATA_SETS]; - hid_t dataset[NUM_DATA_SETS]; + int i; + int j; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dxpl_id = -1; + hid_t att_id[NUM_DATA_SETS]; + hid_t att_space[NUM_DATA_SETS]; + hid_t lg_att_id[NUM_DATA_SETS]; + hid_t lg_att_space[NUM_DATA_SETS]; + hid_t disk_space[NUM_DATA_SETS]; + hid_t mem_space[NUM_DATA_SETS]; + hid_t dataset[NUM_DATA_SETS]; hsize_t att_size[1]; hsize_t lg_att_size[1]; hsize_t disk_count[1]; @@ -1875,10 +1824,10 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) hsize_t mem_count[1]; hsize_t mem_size[1]; hsize_t mem_start[1]; - herr_t err; - double data[LOCAL_DATA_SIZE]; - double att[LOCAL_DATA_SIZE]; - double lg_att[LARGE_ATTR_SIZE]; + herr_t err; + double data[LOCAL_DATA_SIZE]; + double att[LOCAL_DATA_SIZE]; + double lg_att[LARGE_ATTR_SIZE]; /* MPI variables */ /* world communication size and rank */ @@ -1887,22 +1836,22 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* private communicator size and rank */ int mpi_size; int mpi_rank; - int mrc; /* mpi error code */ + int mrc; /* mpi error code */ /* steps to verify and have been verified */ - int steps = 0; + int steps = 0; int steps_done = 0; /* test bed related variables */ - const char * fcn_name = "rr_obj_hdr_flush_confusion_writer"; - const hbool_t verbose = FALSE; - const H5Ptest_param_t * pt; - char * filename; + const char *fcn_name = "rr_obj_hdr_flush_confusion_writer"; + const hbool_t verbose = FALSE; + const H5Ptest_param_t *pt; + char *filename; /* * setup test bed related variables: */ - pt = (const H5Ptest_param_t *)GetTestParameters(); + pt = (const H5Ptest_param_t *)GetTestParameters(); filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); @@ -1914,108 +1863,99 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) * Set up file access property list with parallel I/O access */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Setting up property list.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); fapl_id = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); - VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed"); - + VRFY((err >= 0), "H5Pset_fapl_mpio() failed"); /* * Create a new file collectively and release property list identifier. */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", - mpi_rank, fcn_name, filename); + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename); file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); - VRFY((file_id >= 0 ), "H5Fcreate() failed"); + VRFY((file_id >= 0), "H5Fcreate() failed"); err = H5Pclose(fapl_id); - VRFY((err >= 0 ), "H5Pclose(fapl_id) failed"); - + VRFY((err >= 0), "H5Pclose(fapl_id) failed"); /* * Step 1: create the data sets and write data. */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name); disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size); - mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); + mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { disk_space[i] = H5Screate_simple(1, disk_size, NULL); - VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n"); + VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n"); - dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, - disk_space[i], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT, + H5P_DEFAULT, H5P_DEFAULT); VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n"); } - /* + /* * setup data transfer property list */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); dxpl_id = H5Pcreate(H5P_DATASET_XFER); VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); - VRFY((err >= 0), - "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); + VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); - /* - * write data to the data sets + /* + * write data to the data sets */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name); disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); - mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - mem_start[0] = (hsize_t)(0); + mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + mem_start[0] = (hsize_t)(0); - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { + for (j = 0; j < LOCAL_DATA_SIZE; j++) { data[j] = (double)(mpi_rank + 1); } - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, - NULL, disk_count, NULL); + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL); VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); mem_space[i] = H5Screate_simple(1, mem_size, NULL); - VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); - err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, - mem_start, NULL, mem_count, NULL); + VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); + err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); - err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], - disk_space[i], dxpl_id, data); + err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data); VRFY((err >= 0), "H5Dwrite(1) failed.\n"); - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - data[j] *= 10.0; + for (j = 0; j < LOCAL_DATA_SIZE; j++) + data[j] *= 10.0; } - /* + /* * close the data spaces */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { err = H5Sclose(disk_space[i]); VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); err = H5Sclose(mem_space[i]); @@ -2024,55 +1964,53 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* End of Step 1: create the data sets and write data. */ - /* + /* * flush the metadata cache */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(1) failed.\n"); /* Tell the reader to check the file up to steps. */ steps++; Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); /* * Step 2: write attributes to each dataset */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name); att_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { + for (j = 0; j < LOCAL_DATA_SIZE; j++) { att[j] = (double)(j + 1); } - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { att_space[i] = H5Screate_simple(1, att_size, NULL); VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n"); - att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, - att_space[i], H5P_DEFAULT, H5P_DEFAULT); + att_id[i] = + H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT); VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n"); err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att); VRFY((err >= 0), "H5Awrite(1) failed.\n"); - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { + for (j = 0; j < LOCAL_DATA_SIZE; j++) { att[j] /= 10.0; } } /* - * close attribute IDs and spaces + * close attribute IDs and spaces */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", - mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + if (verbose) + HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name); + for (i = 0; i < NUM_DATA_SETS; i++) { err = H5Sclose(att_space[i]); VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n"); err = H5Aclose(att_id[i]); @@ -2081,115 +2019,112 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) /* End of Step 2: write attributes to each dataset */ - /* + /* * flush the metadata cache again */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(2) failed.\n"); /* Tell the reader to check the file up to steps. */ steps++; Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); /* * Step 3: write large attributes to each dataset */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: writing large attributes.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name); lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE); - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { + for (j = 0; j < LARGE_ATTR_SIZE; j++) { lg_att[j] = (double)(j + 1); } - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL); VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n"); - lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, - lg_att_space[i], H5P_DEFAULT, H5P_DEFAULT); + lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT, + H5P_DEFAULT); VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n"); err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); VRFY((err >= 0), "H5Awrite(2) failed.\n"); - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { + for (j = 0; j < LARGE_ATTR_SIZE; j++) { lg_att[j] /= 10.0; } } - + /* Step 3: write large attributes to each dataset */ - /* + /* * flush the metadata cache yet again to clean the object headers. * - * This is an attempt to crate a situation where we have dirty - * object header continuation chunks, but clean opject headers + * This is an attempt to create a situation where we have dirty + * object header continuation chunks, but clean object headers * to verify a speculative bug fix -- it doesn't seem to work, - * but I will leave the code in anyway, as the object header + * but I will leave the code in anyway, as the object header * code is going to change a lot in the near future. */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(3) failed.\n"); /* Tell the reader to check the file up to steps. */ steps++; Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); /* * Step 4: write different large attributes to each dataset */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name); - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { + for (j = 0; j < LARGE_ATTR_SIZE; j++) { lg_att[j] = (double)(j + 2); } - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att); VRFY((err >= 0), "H5Awrite(2) failed.\n"); - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { + for (j = 0; j < LARGE_ATTR_SIZE; j++) { lg_att[j] /= 10.0; } } /* End of Step 4: write different large attributes to each dataset */ - /* + /* * flush the metadata cache again */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name); err = H5Fflush(file_id, H5F_SCOPE_GLOBAL); VRFY((err >= 0), "H5Fflush(3) failed.\n"); /* Tell the reader to check the file up to steps. */ steps++; Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); /* Step 5: Close all objects and the file */ /* - * close large attribute IDs and spaces + * close large attribute IDs and spaces */ - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", - mpi_rank, fcn_name); + if (verbose) + HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name); - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { err = H5Sclose(lg_att_space[i]); VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n"); @@ -2197,15 +2132,14 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n"); } - - /* + /* * close the data sets */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name); - for ( i = 0; i < NUM_DATA_SETS; i++ ) { + for (i = 0; i < NUM_DATA_SETS; i++) { err = H5Dclose(dataset[i]); VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); } @@ -2214,65 +2148,66 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm) * close the data transfer property list. */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); err = H5Pclose(dxpl_id); VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); - /* * Close file. */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name); err = H5Fclose(file_id); - VRFY((err >= 0 ), "H5Fclose(1) failed"); - + VRFY((err >= 0), "H5Fclose(1) failed"); + /* End of Step 5: Close all objects and the file */ /* Tell the reader to check the file up to steps. */ steps++; Reader_check(mrc, steps, steps_done); - + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); /* All done. Inform reader to end. */ - steps=0; + steps = 0; Reader_check(mrc, steps, steps_done); + VRFY((MPI_SUCCESS == mrc), "Reader_check failed"); - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); return; } /* rr_obj_hdr_flush_confusion_writer() */ -void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) +void +rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) { - int i; - int j; - hid_t file_id = -1; - hid_t fapl_id = -1; - hid_t dxpl_id = -1; - hid_t lg_att_id[NUM_DATA_SETS]; - hid_t lg_att_type[NUM_DATA_SETS]; - hid_t disk_space[NUM_DATA_SETS]; - hid_t mem_space[NUM_DATA_SETS]; - hid_t dataset[NUM_DATA_SETS]; + int i; + int j; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dxpl_id = -1; + hid_t lg_att_id[NUM_DATA_SETS]; + hid_t lg_att_type[NUM_DATA_SETS]; + hid_t disk_space[NUM_DATA_SETS]; + hid_t mem_space[NUM_DATA_SETS]; + hid_t dataset[NUM_DATA_SETS]; hsize_t disk_count[1]; hsize_t disk_start[1]; hsize_t mem_count[1]; hsize_t mem_size[1]; hsize_t mem_start[1]; - herr_t err; - htri_t tri_err; - double data[LOCAL_DATA_SIZE]; - double data_read[LOCAL_DATA_SIZE]; - double att[LOCAL_DATA_SIZE]; - double att_read[LOCAL_DATA_SIZE]; - double lg_att[LARGE_ATTR_SIZE]; - double lg_att_read[LARGE_ATTR_SIZE]; + herr_t err; + htri_t tri_err; + double data[LOCAL_DATA_SIZE]; + double data_read[LOCAL_DATA_SIZE]; + double att[LOCAL_DATA_SIZE]; + double att_read[LOCAL_DATA_SIZE]; + double lg_att[LARGE_ATTR_SIZE]; + double lg_att_read[LARGE_ATTR_SIZE]; /* MPI variables */ /* world communication size and rank */ @@ -2281,21 +2216,21 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) /* private communicator size and rank */ int mpi_size; int mpi_rank; - int mrc; /* mpi error code */ - int steps = -1; /* How far (steps) to verify the file */ - int steps_done = -1; /* How far (steps) have been verified */ + int mrc; /* mpi error code */ + int steps = -1; /* How far (steps) to verify the file */ + int steps_done = -1; /* How far (steps) have been verified */ /* test bed related variables */ - const char * fcn_name = "rr_obj_hdr_flush_confusion_reader"; - const hbool_t verbose = FALSE; - const H5Ptest_param_t * pt; - char * filename; + const char *fcn_name = "rr_obj_hdr_flush_confusion_reader"; + const hbool_t verbose = FALSE; + const H5Ptest_param_t *pt; + char *filename; /* * setup test bed related variables: */ - pt = (const H5Ptest_param_t *)GetTestParameters(); + pt = (const H5Ptest_param_t *)GetTestParameters(); filename = pt->name; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank); @@ -2305,295 +2240,285 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) /* Repeatedly re-open the file and verify its contents until it is */ /* told to end (when steps=0). */ - while (steps_done != 0){ - Reader_wait(mrc, steps); - VRFY((mrc >= 0), "Reader_wait failed"); - steps_done = 0; - - if (steps > 0 ){ - /* - * Set up file access property list with parallel I/O access - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Setting up property list.\n", - mpi_rank, fcn_name); - - fapl_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); - err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); - VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed"); - - /* - * Create a new file collectively and release property list identifier. - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", - mpi_rank, fcn_name, filename); - - file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); - VRFY((file_id >= 0 ), "H5Fopen() failed"); - err = H5Pclose(fapl_id); - VRFY((err >= 0 ), "H5Pclose(fapl_id) failed"); + while (steps_done != 0) { + Reader_wait(mrc, steps); + VRFY((mrc >= 0), "Reader_wait failed"); + steps_done = 0; + + if (steps > 0) { + /* + * Set up file access property list with parallel I/O access + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name); + + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed"); + err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL); + VRFY((err >= 0), "H5Pset_fapl_mpio() failed"); + + /* + * Create a new file collectively and release property list identifier. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename); + + file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "H5Fopen() failed"); + err = H5Pclose(fapl_id); + VRFY((err >= 0), "H5Pclose(fapl_id) failed"); #if 1 - if (steps >= 1){ - /*=====================================================* - * Step 1: open the data sets and read data. - *=====================================================*/ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: opening the datasets.\n", - mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - dataset[i] = -1; - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT); - VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n"); - disk_space[i] = H5Dget_space(dataset[i]); - VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n"); - } - - /* - * setup data transfer property list - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); - - dxpl_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); - err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); - VRFY((err >= 0), - "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); - - /* - * read data from the data sets - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name); - - disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); - - mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); - - mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); - mem_start[0] = (hsize_t)(0); - - /* set up expected data for verification */ - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { - data[j] = (double)(mpi_rank + 1); - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, - NULL, disk_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); - mem_space[i] = H5Screate_simple(1, mem_size, NULL); - VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); - err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, - mem_start, NULL, mem_count, NULL); - VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); - err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], - disk_space[i], dxpl_id, data_read); - VRFY((err >= 0), "H5Dread(1) failed.\n"); - - /* compare read data with expected data */ - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - if (data_read[j] != data[j]){ - HDfprintf(stdout, - "%0d:%s: Reading datasets value failed in " - "Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, data[j], data_read[j]); - nerrors++; - } - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - data[j] *= 10.0; - } - - /* - * close the data spaces - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - err = H5Sclose(disk_space[i]); - VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); - err = H5Sclose(mem_space[i]); - VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); - } - steps_done++; - } - /* End of Step 1: open the data sets and read data. */ + if (steps >= 1) { + /*=====================================================* + * Step 1: open the data sets and read data. + *=====================================================*/ + + if (verbose) + HDfprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + dataset[i] = -1; + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT); + VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n"); + disk_space[i] = H5Dget_space(dataset[i]); + VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n"); + } + + /* + * setup data transfer property list + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n"); + err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); + VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n"); + + /* + * read data from the data sets + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name); + + disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank); + + mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE); + + mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE); + mem_start[0] = (hsize_t)(0); + + /* set up expected data for verification */ + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + data[j] = (double)(mpi_rank + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, + NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n"); + mem_space[i] = H5Screate_simple(1, mem_size, NULL); + VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n"); + err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL); + VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n"); + err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, + data_read); + VRFY((err >= 0), "H5Dread(1) failed.\n"); + + /* compare read data with expected data */ + for (j = 0; j < LOCAL_DATA_SIZE; j++) + if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) { + HDfprintf(stdout, + "%0d:%s: Reading datasets value failed in " + "Dataset %d, at position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, data[j], data_read[j]); + nerrors++; + } + for (j = 0; j < LOCAL_DATA_SIZE; j++) + data[j] *= 10.0; + } + + /* + * close the data spaces + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + err = H5Sclose(disk_space[i]); + VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n"); + err = H5Sclose(mem_space[i]); + VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n"); + } + steps_done++; + } + /* End of Step 1: open the data sets and read data. */ #endif #if 1 - /*=====================================================* - * Step 2: reading attributes from each dataset - *=====================================================*/ - - if (steps >= 2){ - if(verbose ) - HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name); - - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { - - att[j] = (double)(j + 1); - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - hid_t att_id, att_type; - - att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT); - VRFY((att_id >= 0), "H5Aopen failed.\n"); - att_type = H5Aget_type(att_id); - VRFY((att_type >= 0), "H5Aget_type failed.\n"); - tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE); - VRFY((tri_err >= 0), "H5Tequal failed.\n"); - if (tri_err==0){ - HDfprintf(stdout, - "%0d:%s: Mismatched Attribute type of Dataset %d.\n", - mpi_rank, fcn_name, i); - nerrors++; - }else{ - /* should verify attribute size before H5Aread */ - err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read); - VRFY((err >= 0), "H5Aread failed.\n"); - /* compare read attribute data with expected data */ - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) - if (att_read[j] != att[j]){ - HDfprintf(stdout, - "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, att[j], att_read[j]); - nerrors++; - } - for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) { - - att[j] /= 10.0; - } - } - err = H5Aclose(att_id); - VRFY((err >= 0), "H5Aclose failed.\n"); - } - steps_done++; - } - /* End of Step 2: reading attributes from each dataset */ + /*=====================================================* + * Step 2: reading attributes from each dataset + *=====================================================*/ + + if (steps >= 2) { + if (verbose) + HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name); + + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + att[j] = (double)(j + 1); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + hid_t att_id, att_type; + + att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT); + VRFY((att_id >= 0), "H5Aopen failed.\n"); + att_type = H5Aget_type(att_id); + VRFY((att_type >= 0), "H5Aget_type failed.\n"); + tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE); + VRFY((tri_err >= 0), "H5Tequal failed.\n"); + if (tri_err == 0) { + HDfprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank, + fcn_name, i); + nerrors++; + } + else { + /* should verify attribute size before H5Aread */ + err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read); + VRFY((err >= 0), "H5Aread failed.\n"); + /* compare read attribute data with expected data */ + for (j = 0; j < LOCAL_DATA_SIZE; j++) + if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) { + HDfprintf(stdout, + "%0d:%s: Mismatched attribute data read in Dataset %d, at position " + "%d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, att[j], att_read[j]); + nerrors++; + } + for (j = 0; j < LOCAL_DATA_SIZE; j++) { + att[j] /= 10.0; + } + } + err = H5Aclose(att_id); + VRFY((err >= 0), "H5Aclose failed.\n"); + } + steps_done++; + } + /* End of Step 2: reading attributes from each dataset */ #endif - #if 1 - /*=====================================================* - * Step 3 or 4: read large attributes from each dataset. - * Step 4 has different attribute value from step 3. - *=====================================================*/ - - if (steps >= 3){ - if(verbose ) - HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name); - - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { - - lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2); - } - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT); - VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n"); - lg_att_type[i] = H5Aget_type(lg_att_id[i]); - VRFY((err >= 0), "H5Aget_type failed.\n"); - tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE); - VRFY((tri_err >= 0), "H5Tequal failed.\n"); - if (tri_err==0){ - HDfprintf(stdout, - "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", - mpi_rank, fcn_name, i); - nerrors++; - }else{ - /* should verify large attribute size before H5Aread */ - err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read); - VRFY((err >= 0), "H5Aread failed.\n"); - /* compare read attribute data with expected data */ - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) - if (lg_att_read[j] != lg_att[j]){ - HDfprintf(stdout, - "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n", - mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]); - nerrors++; - } - for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) { - - lg_att[j] /= 10.0; - } - } - err = H5Tclose(lg_att_type[i]); - VRFY((err >= 0), "H5Tclose failed.\n"); - err = H5Aclose(lg_att_id[i]); - VRFY((err >= 0), "H5Aclose failed.\n"); - } - /* Both step 3 and 4 use this same read checking code. */ - steps_done = (steps==3) ? 3 : 4; - } - - /* End of Step 3 or 4: read large attributes from each dataset */ + /*=====================================================* + * Step 3 or 4: read large attributes from each dataset. + * Step 4 has different attribute value from step 3. + *=====================================================*/ + + if (steps >= 3) { + if (verbose) + HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name); + + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2); + } + + for (i = 0; i < NUM_DATA_SETS; i++) { + lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT); + VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n"); + lg_att_type[i] = H5Aget_type(lg_att_id[i]); + VRFY((err >= 0), "H5Aget_type failed.\n"); + tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE); + VRFY((tri_err >= 0), "H5Tequal failed.\n"); + if (tri_err == 0) { + HDfprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n", + mpi_rank, fcn_name, i); + nerrors++; + } + else { + /* should verify large attribute size before H5Aread */ + err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read); + VRFY((err >= 0), "H5Aread failed.\n"); + /* compare read attribute data with expected data */ + for (j = 0; j < LARGE_ATTR_SIZE; j++) + if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) { + HDfprintf(stdout, + "%0d:%s: Mismatched large attribute data read in Dataset %d, at " + "position %d: expect %f, got %f.\n", + mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]); + nerrors++; + } + for (j = 0; j < LARGE_ATTR_SIZE; j++) { + + lg_att[j] /= 10.0; + } + } + err = H5Tclose(lg_att_type[i]); + VRFY((err >= 0), "H5Tclose failed.\n"); + err = H5Aclose(lg_att_id[i]); + VRFY((err >= 0), "H5Aclose failed.\n"); + } + /* Both step 3 and 4 use this same read checking code. */ + steps_done = (steps == 3) ? 3 : 4; + } + + /* End of Step 3 or 4: read large attributes from each dataset */ #endif - - /*=====================================================* - * Step 5: read all objects from the file - *=====================================================*/ - if (steps>=5){ - /* nothing extra to verify. The file is closed normally. */ - /* Just increment steps_done */ - steps_done++; - } - - /* - * Close the data sets - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing datasets again.\n", - mpi_rank, fcn_name); - - for ( i = 0; i < NUM_DATA_SETS; i++ ) { - if ( dataset[i] >= 0 ) { - err = H5Dclose(dataset[i]); - VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); - } - } - - /* - * close the data transfer property list. - */ - - if(verbose ) - HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); - - err = H5Pclose(dxpl_id); - VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); - - /* - * Close the file - */ - if(verbose) - HDfprintf(stdout, "%0d:%s: closing file again.\n", - mpi_rank, fcn_name); - err = H5Fclose(file_id); - VRFY((err >= 0 ), "H5Fclose(1) failed"); - - } /* else if (steps_done==0) */ - Reader_result(mrc, steps_done); + /*=====================================================* + * Step 5: read all objects from the file + *=====================================================*/ + if (steps >= 5) { + /* nothing extra to verify. The file is closed normally. */ + /* Just increment steps_done */ + steps_done++; + } + + /* + * Close the data sets + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name); + + for (i = 0; i < NUM_DATA_SETS; i++) { + if (dataset[i] >= 0) { + err = H5Dclose(dataset[i]); + VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n"); + } + } + + /* + * close the data transfer property list. + */ + + if (verbose) + HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name); + + err = H5Pclose(dxpl_id); + VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n"); + + /* + * Close the file + */ + if (verbose) + HDfprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name); + err = H5Fclose(file_id); + VRFY((err >= 0), "H5Fclose(1) failed"); + + } /* else if (steps_done==0) */ + Reader_result(mrc, steps_done); } /* end while(1) */ - if(verbose ) + if (verbose) HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name); return; @@ -2608,7 +2533,91 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm) #undef Writer_Root #undef Reader_Root +/* + * Test creating a chunked dataset in parallel in a file with an alignment set + * and an alignment threshold large enough to avoid aligning the chunks but + * small enough that the raw data aggregator will be aligned if it is treated as + * an object that must be aligned by the library + */ +#define CHUNK_SIZE 72 +#define NCHUNKS 32 +#define AGGR_SIZE 2048 +#define EXTRA_ALIGN 100 + +void +chunk_align_bug_1(void) +{ + int mpi_rank; + hid_t file_id, dset_id, fapl_id, dcpl_id, space_id; + hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE; + h5_stat_size_t file_size; + hsize_t align; + herr_t ret; + const char *filename; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + filename = (const char *)GetTestParameters(); + + /* Create file without alignment */ + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); + VRFY((fapl_id >= 0), "create_faccess_plist succeeded"); + file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + /* Close file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + + /* Get file size */ + file_size = h5_get_file_size(filename, fapl_id); + VRFY((file_size >= 0), "h5_get_file_size succeeded"); + + /* Calculate alignment value, set to allow a chunk to squeak in between the + * original EOF and the aligned location of the aggregator. Add some space + * for the dataset metadata */ + align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN; + + /* Set aggregator size and alignment, disable metadata aggregator */ + HDassert(AGGR_SIZE > CHUNK_SIZE); + ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE); + VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); + ret = H5Pset_meta_block_size(fapl_id, 0); + VRFY((ret >= 0), "H5Pset_meta_block_size succeeded"); + ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align); + VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded"); + + /* Reopen file with new settings */ + file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + /* Create dataset */ + space_id = H5Screate_simple(1, &dims, NULL); + VRFY((space_id >= 0), "H5Screate_simple succeeded"); + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcpl_id >= 0), "H5Pcreate succeeded"); + ret = H5Pset_chunk(dcpl_id, 1, &cdims); + VRFY((ret >= 0), "H5Pset_chunk succeeded"); + dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + VRFY((dset_id >= 0), "H5Dcreate2 succeeded"); + + /* Close ids */ + ret = H5Dclose(dset_id); + VRFY((dset_id >= 0), "H5Dclose succeeded"); + ret = H5Sclose(space_id); + VRFY((space_id >= 0), "H5Sclose succeeded"); + ret = H5Pclose(dcpl_id); + VRFY((dcpl_id >= 0), "H5Pclose succeeded"); + ret = H5Pclose(fapl_id); + VRFY((fapl_id >= 0), "H5Pclose succeeded"); + + /* Close file */ + ret = H5Fclose(file_id); + VRFY((ret >= 0), "H5Fclose succeeded"); + + return; +} /* end chunk_align_bug_1() */ + /*============================================================================= * End of t_mdset.c *===========================================================================*/ - diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c index 7bd2f58..d0400ae 100644 --- a/testpar/t_mpi.c +++ b/testpar/t_mpi.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -30,137 +27,136 @@ #include "testpar.h" /* FILENAME and filenames must have the same number of names */ -const char *FILENAME[2]={ - "MPItest", - NULL}; -char filenames[2][200]; -int nerrors = 0; -hid_t fapl; /* file access property list */ +const char *FILENAME[2] = {"MPItest", NULL}; +char filenames[2][200]; +int nerrors = 0; +hid_t fapl; /* file access property list */ /* protocols */ static int errors_sum(int nerrs); -#define MPIO_TEST_WRITE_SIZE 1024*1024 /* 1 MB */ +#define MPIO_TEST_WRITE_SIZE 1024 * 1024 /* 1 MB */ static int test_mpio_overlap_writes(char *filename) { - int mpi_size, mpi_rank; - MPI_Comm comm; - MPI_Info info = MPI_INFO_NULL; - int color, mrc; - MPI_File fh; - int i; - int vrfyerrs, nerrs; - unsigned char buf[4093]; /* use some prime number for size */ - int bufsize = sizeof(buf); - MPI_Offset stride; - MPI_Offset mpi_off; - MPI_Status mpi_stat; - + int mpi_size, mpi_rank; + MPI_Comm comm; + MPI_Info info = MPI_INFO_NULL; + int color, mrc; + MPI_File fh; + int i; + int vrfyerrs, nerrs; + unsigned char *buf = NULL; + int bufsize; + MPI_Offset stride; + MPI_Offset mpi_off; + MPI_Status mpi_stat; if (VERBOSE_MED) - printf("MPIO independent overlapping writes test on file %s\n", - filename); + HDprintf("MPIO independent overlapping writes test on file %s\n", filename); nerrs = 0; /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* Need at least 2 processes */ if (mpi_size < 2) { - if (MAINPROCESS) - printf("Need at least 2 processes to run MPIO test.\n"); - printf(" -SKIP- \n"); - return 0; + if (MAINPROCESS) + HDprintf("Need at least 2 processes to run MPIO test.\n"); + HDprintf(" -SKIP- \n"); + return 0; + } + + bufsize = 4093; /* use some prime number for size */ + if (NULL == (buf = HDmalloc((size_t)bufsize))) { + if (MAINPROCESS) + HDprintf("couldn't allocate buffer\n"); + return 1; } /* splits processes 0 to n-2 into one comm. and the last one into another */ color = ((mpi_rank < (mpi_size - 1)) ? 0 : 1); - mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm); - VRFY((mrc==MPI_SUCCESS), "Comm_split succeeded"); - - if (color==0){ - /* First n-1 processes (color==0) open a file and write it */ - mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE|MPI_MODE_RDWR, - info, &fh); - VRFY((mrc==MPI_SUCCESS), ""); - - stride = 1; - mpi_off = mpi_rank*stride; - while (mpi_off < MPIO_TEST_WRITE_SIZE){ - /* make sure the write does not exceed the TEST_WRITE_SIZE */ - if (mpi_off+stride > MPIO_TEST_WRITE_SIZE) - stride = MPIO_TEST_WRITE_SIZE - mpi_off; - - /* set data to some trivial pattern for easy verification */ - for (i=0; i<stride; i++) - buf[i] = (unsigned char)(mpi_off+i); - mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE, - &mpi_stat); - VRFY((mrc==MPI_SUCCESS), ""); - - /* move the offset pointer to last byte written by all processes */ - mpi_off += (mpi_size - 1 - mpi_rank) * stride; - - /* Increase chunk size without exceeding buffer size. */ - /* Then move the starting offset for next write. */ - stride *= 2; - if (stride > bufsize) - stride = bufsize; - mpi_off += mpi_rank*stride; - } - - /* close file and free the communicator */ - mrc = MPI_File_close(&fh); - VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE"); - mrc = MPI_Comm_free(&comm); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free"); - - /* sync with the other waiting processes */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after writes"); - }else{ - /* last process waits till writes are done, - * then opens file to verify data. - */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after writes"); - - mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY, - info, &fh); - VRFY((mrc==MPI_SUCCESS), ""); - - stride = bufsize; - for (mpi_off=0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize){ - /* make sure it does not read beyond end of data */ - if (mpi_off+stride > MPIO_TEST_WRITE_SIZE) - stride = MPIO_TEST_WRITE_SIZE - mpi_off; - mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE, - &mpi_stat); - VRFY((mrc==MPI_SUCCESS), ""); - vrfyerrs=0; - for (i=0; i<stride; i++){ - unsigned char expected; - expected = (unsigned char)(mpi_off+i); - if ((expected != buf[i]) && - (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) { - printf("proc %d: found data error at [%ld], expect %u, got %u\n", - mpi_rank, (long)(mpi_off+i), expected, buf[i]); - } - } - if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("proc %d: [more errors ...]\n", mpi_rank); - - nerrs += vrfyerrs; - } - - /* close file and free the communicator */ - mrc = MPI_File_close(&fh); - VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE"); - mrc = MPI_Comm_free(&comm); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free"); + mrc = MPI_Comm_split(MPI_COMM_WORLD, color, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), "Comm_split succeeded"); + + if (color == 0) { + /* First n-1 processes (color==0) open a file and write it */ + mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh); + VRFY((mrc == MPI_SUCCESS), ""); + + stride = 1; + mpi_off = mpi_rank * stride; + while (mpi_off < MPIO_TEST_WRITE_SIZE) { + /* make sure the write does not exceed the TEST_WRITE_SIZE */ + if (mpi_off + stride > MPIO_TEST_WRITE_SIZE) + stride = MPIO_TEST_WRITE_SIZE - mpi_off; + + /* set data to some trivial pattern for easy verification */ + for (i = 0; i < stride; i++) + buf[i] = (unsigned char)(mpi_off + i); + mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE, &mpi_stat); + VRFY((mrc == MPI_SUCCESS), ""); + + /* move the offset pointer to last byte written by all processes */ + mpi_off += (mpi_size - 1 - mpi_rank) * stride; + + /* Increase chunk size without exceeding buffer size. */ + /* Then move the starting offset for next write. */ + stride *= 2; + if (stride > bufsize) + stride = bufsize; + mpi_off += mpi_rank * stride; + } + + /* close file and free the communicator */ + mrc = MPI_File_close(&fh); + VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE"); + mrc = MPI_Comm_free(&comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + + /* sync with the other waiting processes */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after writes"); + } + else { + /* last process waits till writes are done, + * then opens file to verify data. + */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after writes"); + + mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY, info, &fh); + VRFY((mrc == MPI_SUCCESS), ""); + + stride = bufsize; + for (mpi_off = 0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize) { + /* make sure it does not read beyond end of data */ + if (mpi_off + stride > MPIO_TEST_WRITE_SIZE) + stride = MPIO_TEST_WRITE_SIZE - mpi_off; + mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE, &mpi_stat); + VRFY((mrc == MPI_SUCCESS), ""); + vrfyerrs = 0; + for (i = 0; i < stride; i++) { + unsigned char expected; + expected = (unsigned char)(mpi_off + i); + if ((expected != buf[i]) && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) { + HDprintf("proc %d: found data error at [%ld], expect %u, got %u\n", mpi_rank, + (long)(mpi_off + i), expected, buf[i]); + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("proc %d: [more errors ...]\n", mpi_rank); + + nerrs += vrfyerrs; + } + + /* close file and free the communicator */ + mrc = MPI_File_close(&fh); + VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE"); + mrc = MPI_Comm_free(&comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); } /* @@ -168,15 +164,17 @@ test_mpio_overlap_writes(char *filename) * before ending this test. */ mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync before leaving test"); + VRFY((mrc == MPI_SUCCESS), "Sync before leaving test"); + + HDfree(buf); + return (nerrs); } - -#define MB 1048576 /* 1024*1024 == 2**20 */ -#define GB 1073741824 /* 1024**3 == 2**30 */ -#define TWO_GB_LESS1 2147483647 /* 2**31 - 1 */ -#define FOUR_GB_LESS1 4294967295L /* 2**32 - 1 */ +#define MB 1048576 /* 1024*1024 == 2**20 */ +#define GB 1073741824 /* 1024**3 == 2**30 */ +#define TWO_GB_LESS1 2147483647 /* 2**31 - 1 */ +#define FOUR_GB_LESS1 4294967295L /* 2**32 - 1 */ /* * Verify that MPI_Offset exceeding 2**31 can be computed correctly. * Print any failure as information only, not as an error so that this @@ -191,229 +189,227 @@ test_mpio_overlap_writes(char *filename) static int test_mpio_gb_file(char *filename) { - int mpi_size, mpi_rank; - MPI_Info info = MPI_INFO_NULL; - int mrc; - MPI_File fh; - int i, j, n; - int vrfyerrs; - int writerrs; /* write errors */ - int nerrs; - int ntimes; /* how many times */ - char *buf = NULL; - char expected; - MPI_Offset size; - MPI_Offset mpi_off; - MPI_Offset mpi_off_old; - MPI_Status mpi_stat; - int is_signed, sizeof_mpi_offset; + int mpi_size, mpi_rank; + MPI_Info info = MPI_INFO_NULL; + int mrc; + MPI_File fh; + int i, j, n; + int vrfyerrs; + int writerrs; /* write errors */ + int nerrs; + int ntimes; /* how many times */ + char *buf = NULL; + char expected; + MPI_Offset size; + MPI_Offset mpi_off; + MPI_Offset mpi_off_old; + MPI_Status mpi_stat; + int is_signed, sizeof_mpi_offset; nerrs = 0; /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if (VERBOSE_MED) - printf("MPI_Offset range test\n"); + HDprintf("MPI_Offset range test\n"); /* figure out the signness and sizeof MPI_Offset */ - mpi_off = 0; - is_signed = ((MPI_Offset)(mpi_off - 1)) < 0; + mpi_off = 0; + is_signed = ((MPI_Offset)(mpi_off - 1)) < 0; sizeof_mpi_offset = (int)(sizeof(MPI_Offset)); /* * Verify the sizeof MPI_Offset and correctness of handling multiple GB * sizes. */ - if (MAINPROCESS){ /* only process 0 needs to check it*/ - printf("MPI_Offset is %s %d bytes integeral type\n", - is_signed ? "signed" : "unsigned", (int)sizeof(MPI_Offset)); - if (sizeof_mpi_offset <= 4 && is_signed){ - printf("Skipped 2GB range test " - "because MPI_Offset cannot support it\n"); - }else { - /* verify correctness of assigning 2GB sizes */ - mpi_off = 2 * 1024 * (MPI_Offset)MB; - INFO((mpi_off>0), "2GB OFFSET assignment no overflow"); - INFO((mpi_off-1)==TWO_GB_LESS1, "2GB OFFSET assignment succeed"); - - /* verify correctness of increasing from below 2 GB to above 2GB */ - mpi_off = TWO_GB_LESS1; - for (i=0; i < 3; i++){ - mpi_off_old = mpi_off; - mpi_off = mpi_off + 1; - /* no overflow */ - INFO((mpi_off>0), "2GB OFFSET increment no overflow"); - /* correct inc. */ - INFO((mpi_off-1)==mpi_off_old, "2GB OFFSET increment succeed"); - } - } - - if (sizeof_mpi_offset <= 4){ - printf("Skipped 4GB range test " - "because MPI_Offset cannot support it\n"); - }else { - /* verify correctness of assigning 4GB sizes */ - mpi_off = 4 * 1024 * (MPI_Offset)MB; - INFO((mpi_off>0), "4GB OFFSET assignment no overflow"); - INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed"); - - /* verify correctness of increasing from below 4 GB to above 4 GB */ - mpi_off = FOUR_GB_LESS1; - for (i=0; i < 3; i++){ - mpi_off_old = mpi_off; - mpi_off = mpi_off + 1; - /* no overflow */ - INFO((mpi_off>0), "4GB OFFSET increment no overflow"); - /* correct inc. */ - INFO((mpi_off-1)==mpi_off_old, "4GB OFFSET increment succeed"); - } - } + if (MAINPROCESS) { /* only process 0 needs to check it*/ + HDprintf("MPI_Offset is %s %d bytes integral type\n", is_signed ? "signed" : "unsigned", + (int)sizeof(MPI_Offset)); + if (sizeof_mpi_offset <= 4 && is_signed) { + HDprintf("Skipped 2GB range test " + "because MPI_Offset cannot support it\n"); + } + else { + /* verify correctness of assigning 2GB sizes */ + mpi_off = 2 * 1024 * (MPI_Offset)MB; + INFO((mpi_off > 0), "2GB OFFSET assignment no overflow"); + INFO((mpi_off - 1) == TWO_GB_LESS1, "2GB OFFSET assignment succeed"); + + /* verify correctness of increasing from below 2 GB to above 2GB */ + mpi_off = TWO_GB_LESS1; + for (i = 0; i < 3; i++) { + mpi_off_old = mpi_off; + mpi_off = mpi_off + 1; + /* no overflow */ + INFO((mpi_off > 0), "2GB OFFSET increment no overflow"); + /* correct inc. */ + INFO((mpi_off - 1) == mpi_off_old, "2GB OFFSET increment succeed"); + } + } + + if (sizeof_mpi_offset <= 4) { + HDprintf("Skipped 4GB range test " + "because MPI_Offset cannot support it\n"); + } + else { + /* verify correctness of assigning 4GB sizes */ + mpi_off = 4 * 1024 * (MPI_Offset)MB; + INFO((mpi_off > 0), "4GB OFFSET assignment no overflow"); + INFO((mpi_off - 1) == FOUR_GB_LESS1, "4GB OFFSET assignment succeed"); + + /* verify correctness of increasing from below 4 GB to above 4 GB */ + mpi_off = FOUR_GB_LESS1; + for (i = 0; i < 3; i++) { + mpi_off_old = mpi_off; + mpi_off = mpi_off + 1; + /* no overflow */ + INFO((mpi_off > 0), "4GB OFFSET increment no overflow"); + /* correct inc. */ + INFO((mpi_off - 1) == mpi_off_old, "4GB OFFSET increment succeed"); + } + } } /* * Verify if we can write to a file of multiple GB sizes. */ if (VERBOSE_MED) - printf("MPIO GB file test %s\n", filename); - - if (sizeof_mpi_offset <= 4){ - printf("Skipped GB file range test " - "because MPI_Offset cannot support it\n"); - }else{ - buf = HDmalloc(MB); - VRFY((buf!=NULL), "malloc succeed"); - - /* open a new file. Remove it first in case it exists. */ - /* Must delete because MPI_File_open does not have a Truncate mode. */ - /* Don't care if it has error. */ - MPI_File_delete(filename, MPI_INFO_NULL); - MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */ - - mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE|MPI_MODE_RDWR, - info, &fh); - VRFY((mrc==MPI_SUCCESS), "MPI_FILE_OPEN"); - - printf("MPIO GB file write test %s\n", filename); - - /* instead of writing every bytes of the file, we will just write - * some data around the 2 and 4 GB boundaries. That should cover - * potential integer overflow and filesystem size limits. - */ - writerrs = 0; - for (n=2; n <= 4; n+=2){ - ntimes = GB/MB*n/mpi_size + 1; - for (i=ntimes-2; i <= ntimes; i++){ - mpi_off = (i*mpi_size + mpi_rank)*(MPI_Offset)MB; - if (VERBOSE_MED) - HDfprintf(stdout,"proc %d: write to mpi_off=%016llx, %lld\n", - mpi_rank, mpi_off, mpi_off); - /* set data to some trivial pattern for easy verification */ - for (j=0; j<MB; j++) - *(buf+j) = i*mpi_size + mpi_rank; - if (VERBOSE_MED) - HDfprintf(stdout,"proc %d: writing %d bytes at offset %lld\n", - mpi_rank, MB, mpi_off); - mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat); - INFO((mrc==MPI_SUCCESS), "GB size file write"); - if (mrc!=MPI_SUCCESS) - writerrs++; - } - } - - /* close file and free the communicator */ - mrc = MPI_File_close(&fh); - VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE"); - - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after writes"); - - /* - * Verify if we can read the multiple GB file just created. - */ - /* open it again to verify the data written */ - /* but only if there was no write errors */ - printf("MPIO GB file read test %s\n", filename); - if (errors_sum(writerrs)>0){ - printf("proc %d: Skip read test due to previous write errors\n", - mpi_rank); - goto finish; - } - mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); - VRFY((mrc==MPI_SUCCESS), ""); - - /* Only read back parts of the file that have been written. */ - for (n=2; n <= 4; n+=2){ - ntimes = GB/MB*n/mpi_size + 1; - for (i=ntimes-2; i <= ntimes; i++){ - mpi_off = (i*mpi_size + (mpi_size - mpi_rank - 1))*(MPI_Offset)MB; - if (VERBOSE_MED) - HDfprintf(stdout,"proc %d: read from mpi_off=%016llx, %lld\n", - mpi_rank, mpi_off, mpi_off); - mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat); - INFO((mrc==MPI_SUCCESS), "GB size file read"); - expected = i*mpi_size + (mpi_size - mpi_rank - 1); - vrfyerrs=0; - for (j=0; j<MB; j++){ - if ((*(buf+j) != expected) && - (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)){ - printf("proc %d: found data error at [%ld+%d], expect %d, got %d\n", - mpi_rank, (long)mpi_off, j, expected, *(buf+j)); - } - } - if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - printf("proc %d: [more errors ...]\n", mpi_rank); - - nerrs += vrfyerrs; - } - } - - /* close file and free the communicator */ - mrc = MPI_File_close(&fh); - VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE"); - - /* - * one more sync to ensure all processes have done reading - * before ending this test. - */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync before leaving test"); - - printf("Test if MPI_File_get_size works correctly with %s\n", filename); - - mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); - VRFY((mrc==MPI_SUCCESS), ""); - - if (MAINPROCESS){ /* only process 0 needs to check it*/ + HDprintf("MPIO GB file test %s\n", filename); + + if (sizeof_mpi_offset <= 4) { + HDprintf("Skipped GB file range test " + "because MPI_Offset cannot support it\n"); + } + else { + buf = (char *)HDmalloc(MB); + VRFY((buf != NULL), "malloc succeed"); + + /* open a new file. Remove it first in case it exists. */ + /* Must delete because MPI_File_open does not have a Truncate mode. */ + /* Don't care if it has error. */ + MPI_File_delete(filename, MPI_INFO_NULL); + MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */ + + mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh); + VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN"); + + HDprintf("MPIO GB file write test %s\n", filename); + + /* instead of writing every bytes of the file, we will just write + * some data around the 2 and 4 GB boundaries. That should cover + * potential integer overflow and filesystem size limits. + */ + writerrs = 0; + for (n = 2; n <= 4; n += 2) { + ntimes = GB / MB * n / mpi_size + 1; + for (i = ntimes - 2; i <= ntimes; i++) { + mpi_off = (i * mpi_size + mpi_rank) * (MPI_Offset)MB; + if (VERBOSE_MED) + HDfprintf(stdout, "proc %d: write to mpi_off=%016llx, %lld\n", mpi_rank, mpi_off, + mpi_off); + /* set data to some trivial pattern for easy verification */ + for (j = 0; j < MB; j++) + *(buf + j) = (int8_t)(i * mpi_size + mpi_rank); + if (VERBOSE_MED) + HDfprintf(stdout, "proc %d: writing %d bytes at offset %lld\n", mpi_rank, MB, mpi_off); + mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat); + INFO((mrc == MPI_SUCCESS), "GB size file write"); + if (mrc != MPI_SUCCESS) + writerrs++; + } + } + + /* close file and free the communicator */ + mrc = MPI_File_close(&fh); + VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE"); + + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after writes"); + + /* + * Verify if we can read the multiple GB file just created. + */ + /* open it again to verify the data written */ + /* but only if there was no write errors */ + HDprintf("MPIO GB file read test %s\n", filename); + if (errors_sum(writerrs) > 0) { + HDprintf("proc %d: Skip read test due to previous write errors\n", mpi_rank); + goto finish; + } + mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); + VRFY((mrc == MPI_SUCCESS), ""); + + /* Only read back parts of the file that have been written. */ + for (n = 2; n <= 4; n += 2) { + ntimes = GB / MB * n / mpi_size + 1; + for (i = ntimes - 2; i <= ntimes; i++) { + mpi_off = (i * mpi_size + (mpi_size - mpi_rank - 1)) * (MPI_Offset)MB; + if (VERBOSE_MED) + HDfprintf(stdout, "proc %d: read from mpi_off=%016llx, %lld\n", mpi_rank, mpi_off, + mpi_off); + mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat); + INFO((mrc == MPI_SUCCESS), "GB size file read"); + expected = (int8_t)(i * mpi_size + (mpi_size - mpi_rank - 1)); + vrfyerrs = 0; + for (j = 0; j < MB; j++) { + if ((*(buf + j) != expected) && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) { + HDprintf("proc %d: found data error at [%ld+%d], expect %d, got %d\n", mpi_rank, + (long)mpi_off, j, expected, *(buf + j)); + } + } + if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) + HDprintf("proc %d: [more errors ...]\n", mpi_rank); + + nerrs += vrfyerrs; + } + } + + /* close file and free the communicator */ + mrc = MPI_File_close(&fh); + VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE"); + + /* + * one more sync to ensure all processes have done reading + * before ending this test. + */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync before leaving test"); + + HDprintf("Test if MPI_File_get_size works correctly with %s\n", filename); + + mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh); + VRFY((mrc == MPI_SUCCESS), ""); + + if (MAINPROCESS) { /* only process 0 needs to check it*/ mrc = MPI_File_get_size(fh, &size); - VRFY((mrc==MPI_SUCCESS), ""); - VRFY((size == mpi_off+MB), "MPI_File_get_size doesn't return correct file size."); + VRFY((mrc == MPI_SUCCESS), ""); + VRFY((size == mpi_off + MB), "MPI_File_get_size doesn't return correct file size."); } - /* close file and free the communicator */ - mrc = MPI_File_close(&fh); - VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE"); + /* close file and free the communicator */ + mrc = MPI_File_close(&fh); + VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE"); - /* - * one more sync to ensure all processes have done reading - * before ending this test. - */ - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync before leaving test"); + /* + * one more sync to ensure all processes have done reading + * before ending this test. + */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync before leaving test"); } finish: if (buf) - HDfree(buf); + HDfree(buf); return (nerrs); } - /* * MPI-IO Test: One writes, Many reads. * Verify if only one process writes some data and then all other * processes can read them back correctly. This tests if the - * underlaying parallel I/O and file system supports parallel I/O + * underlying parallel I/O and file system supports parallel I/O * correctly. * * Algorithm: Only one process (e.g., process 0) writes some data. @@ -426,191 +422,189 @@ finish: * Each process writes something, then reads all data back. */ -#define DIMSIZE 32 /* Dimension size. */ -#define PRINTID printf("Proc %d: ", mpi_rank) -#define USENONE 0 -#define USEATOM 1 /* request atomic I/O */ -#define USEFSYNC 2 /* request file_sync */ - +#define DIMSIZE 32 /* Dimension size. */ +#define PRINTID HDprintf("Proc %d: ", mpi_rank) +#define USENONE 0 +#define USEATOM 1 /* request atomic I/O */ +#define USEFSYNC 2 /* request file_sync */ static int test_mpio_1wMr(char *filename, int special_request) { - char hostname[128]; - int mpi_size, mpi_rank; - MPI_File fh; - char mpi_err_str[MPI_MAX_ERROR_STRING]; - int mpi_err_strlen; - int mpi_err; + char hostname[128]; + int mpi_size, mpi_rank; + MPI_File fh; + char mpi_err_str[MPI_MAX_ERROR_STRING]; + int mpi_err_strlen; + int mpi_err; unsigned char writedata[DIMSIZE], readdata[DIMSIZE]; unsigned char expect_val; - int i, irank; - int nerrs = 0; /* number of errors */ - int atomicity; - MPI_Offset mpi_off; - MPI_Status mpi_stat; + int i, irank; + int nerrs = 0; /* number of errors */ + int atomicity; + MPI_Offset mpi_off; + MPI_Status mpi_stat; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if (MAINPROCESS && VERBOSE_MED){ - printf("Testing one process writes, all processes read.\n"); - printf("Using %d processes accessing file %s\n", mpi_size, filename); - printf(" (Filename can be specified via program argument)\n"); + if (MAINPROCESS && VERBOSE_MED) { + HDprintf("Testing one process writes, all processes read.\n"); + HDprintf("Using %d processes accessing file %s\n", mpi_size, filename); + HDprintf(" (Filename can be specified via program argument)\n"); } /* show the hostname so that we can tell where the processes are running */ - if (VERBOSE_DEF){ - if (gethostname(hostname, 128) < 0){ - PRINTID; - printf("gethostname failed\n"); - return 1; - } - PRINTID; - printf("hostname=%s\n", hostname); + if (VERBOSE_DEF) { +#ifdef H5_HAVE_GETHOSTNAME + if (HDgethostname(hostname, sizeof(hostname)) < 0) { + HDprintf("gethostname failed\n"); + hostname[0] = '\0'; + } +#else + HDprintf("gethostname unavailable\n"); + hostname[0] = '\0'; +#endif + PRINTID; + HDprintf("hostname=%s\n", hostname); } /* Delete any old file in order to start anew. */ /* Must delete because MPI_File_open does not have a Truncate mode. */ /* Don't care if it has error. */ MPI_File_delete(filename, MPI_INFO_NULL); - MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */ - - if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, - MPI_MODE_RDWR | MPI_MODE_CREATE , - MPI_INFO_NULL, &fh)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_open failed (%s)\n", mpi_err_str); - return 1; - } + MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */ -if (special_request & USEATOM){ - /* ================================================== - * Set atomcity to true (1). A POSIX compliant filesystem - * should not need this. - * ==================================================*/ - if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); - } - if (VERBOSE_HI) - printf("Initial atomicity = %d\n", atomicity); - if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str); + if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, + &fh)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); + return 1; } - if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); + + if (special_request & USEATOM) { + /* ================================================== + * Set atomcity to true (1). A POSIX compliant filesystem + * should not need this. + * ==================================================*/ + if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); + } + if (VERBOSE_HI) + HDprintf("Initial atomicity = %d\n", atomicity); + if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str); + } + if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); + } + if (VERBOSE_HI) + HDprintf("After set_atomicity atomicity = %d\n", atomicity); } - if (VERBOSE_HI) - printf("After set_atomicity atomicity = %d\n", atomicity); -} /* This barrier is not necessary but do it anyway. */ MPI_Barrier(MPI_COMM_WORLD); - if (VERBOSE_HI){ - PRINTID; - printf("between MPI_Barrier and MPI_File_write_at\n"); + if (VERBOSE_HI) { + PRINTID; + HDprintf("between MPI_Barrier and MPI_File_write_at\n"); } /* ================================================== * Each process calculates what to write but * only process irank(0) writes. * ==================================================*/ - irank=0; - for (i=0; i < DIMSIZE; i++) - writedata[i] = irank*DIMSIZE + i; - mpi_off = irank*DIMSIZE; + irank = 0; + for (i = 0; i < DIMSIZE; i++) + writedata[i] = (uint8_t)(irank * DIMSIZE + i); + mpi_off = irank * DIMSIZE; /* Only one process writes */ - if (mpi_rank==irank){ - if (VERBOSE_HI){ - PRINTID; printf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off); - } - if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, - MPI_BYTE, &mpi_stat)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", - (long) mpi_off, DIMSIZE, mpi_err_str); - return 1; - }; + if (mpi_rank == irank) { + if (VERBOSE_HI) { + PRINTID; + HDprintf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off); + } + if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE, &mpi_stat)) != + MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long)mpi_off, DIMSIZE, + mpi_err_str); + return 1; + }; }; /* Bcast the return code and */ /* make sure all writing are done before reading. */ MPI_Bcast(&mpi_err, 1, MPI_INT, irank, MPI_COMM_WORLD); - if (VERBOSE_HI){ - PRINTID; - printf("MPI_Bcast: mpi_err = %d\n", mpi_err); + if (VERBOSE_HI) { + PRINTID; + HDprintf("MPI_Bcast: mpi_err = %d\n", mpi_err); } -if (special_request & USEFSYNC){ - /* ================================================== - * Do a file sync. A POSIX compliant filesystem - * should not need this. - * ==================================================*/ - if (VERBOSE_HI) - printf("Apply MPI_File_sync\n"); - /* call file_sync to force the write out */ - if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_sync failed (%s)\n", mpi_err_str); - } - MPI_Barrier(MPI_COMM_WORLD); - /* call file_sync to force the write out */ - if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_sync failed (%s)\n", mpi_err_str); + if (special_request & USEFSYNC) { + /* ================================================== + * Do a file sync. A POSIX compliant filesystem + * should not need this. + * ==================================================*/ + if (VERBOSE_HI) + HDprintf("Apply MPI_File_sync\n"); + /* call file_sync to force the write out */ + if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str); + } + MPI_Barrier(MPI_COMM_WORLD); + /* call file_sync to force the write out */ + if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str); + } } -} /* This barrier is not necessary because the Bcase or File_sync above */ /* should take care of it. Do it anyway. */ MPI_Barrier(MPI_COMM_WORLD); - if (VERBOSE_HI){ - PRINTID; - printf("after MPI_Barrier\n"); + if (VERBOSE_HI) { + PRINTID; + HDprintf("after MPI_Barrier\n"); } /* ================================================== * Each process reads what process 0 wrote and verify. * ==================================================*/ - irank=0; - mpi_off = irank*DIMSIZE; - if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE, - &mpi_stat)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - PRINTID; - printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n", - (long) mpi_off, DIMSIZE, mpi_err_str); - return 1; + irank = 0; + mpi_off = irank * DIMSIZE; + if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE, &mpi_stat)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + PRINTID; + HDprintf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n", (long)mpi_off, DIMSIZE, + mpi_err_str); + return 1; }; - for (i=0; i < DIMSIZE; i++){ - expect_val = irank*DIMSIZE + i; - if (readdata[i] != expect_val){ - PRINTID; - printf("read data[%d:%d] got %02x, expect %02x\n", irank, i, - readdata[i], expect_val); - nerrs++; - } + for (i = 0; i < DIMSIZE; i++) { + expect_val = (uint8_t)(irank * DIMSIZE + i); + if (readdata[i] != expect_val) { + PRINTID; + HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i, readdata[i], expect_val); + nerrs++; + } } MPI_File_close(&fh); - if (VERBOSE_HI){ - PRINTID; - printf("%d data errors detected\n", nerrs); + if (VERBOSE_HI) { + PRINTID; + HDprintf("%d data errors detected\n", nerrs); } mpi_err = MPI_Barrier(MPI_COMM_WORLD); @@ -619,131 +613,129 @@ if (special_request & USEFSYNC){ /* -Function: test_mpio_derived_dtype - -Test Whether the Displacement of MPI derived datatype -(+ File_set_view + MPI_write)works or not on this MPI-IO package -and this platform. - -1. Details for the test: -1) Create two derived datatypes with MPI_Type_hindexed: - datatype1: - count = 1, blocklens = 1, offsets = 0, - base type = MPI_BYTE(essentially a char) - datatype2: - count = 1, blocklens = 1, offsets = 1(byte), - base type = MPI_BYTE - -2) Using these two derived datatypes, - Build another derived datatype with MPI_Type_struct: - advtype: derived from datatype1 and datatype2 - advtype: - count = 2, blocklens[0] = 1, blocklens[1]=1, - offsets[0] = 0, offsets[1] = 1(byte), - bas_type[0]=datatype1, - bas_type[1] = datatype2; - -3) Setting MPI file view with advtype -4) Writing 2 bytes 1 to 2 using MPI_File_write to a file -5) File content: -Suppose the fill value of the file is 0(most machines indeed do so) -and Fill value is embraced with "() in the following output: -Expected output should be: -1,0,2 - - - -However, at some platforms, for example, IBM AIX(at March 23rd, 2005): -the following values were obtained: -1,2,0 - -The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived datatype(advtype) - has been put after the basic datatype(MPI_BYTE) of datatype2. This is a bug. - - -2. This test will verify whether the complicated derived datatype is working on -the current platform. - -If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change -the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections. - -If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that -we can turn off collective IO support for irregular selections. -*/ - -static int test_mpio_derived_dtype(char *filename) { - - MPI_File fh; - char mpi_err_str[MPI_MAX_ERROR_STRING]; - int mpi_err_strlen; - int mpi_err; - int i; - int nerrors = 0; /* number of errors */ - MPI_Datatype etype,filetype; - MPI_Datatype adv_filetype,bas_filetype[2]; - MPI_Datatype etypenew, filetypenew; - MPI_Offset disp; - MPI_Status Status; - MPI_Aint adv_disp[2]; - MPI_Aint offsets[1]; - int blocklens[1],adv_blocklens[2]; - int count,outcount; - int retcode; - - int mpi_rank,mpi_size; - - char buf[3],outbuf[3] = {0}; + Function: test_mpio_derived_dtype + + Test Whether the Displacement of MPI derived datatype + (+ File_set_view + MPI_write)works or not on this MPI-IO package + and this platform. + + 1. Details for the test: + 1) Create two derived datatypes with MPI_Type_create_hindexed: + datatype1: + count = 1, blocklens = 1, offsets = 0, + base type = MPI_BYTE(essentially a char) + datatype2: + count = 1, blocklens = 1, offsets = 1(byte), + base type = MPI_BYTE + + 2) Using these two derived datatypes, + Build another derived datatype with MPI_Type_create_struct: + advtype: derived from datatype1 and datatype2 + advtype: + count = 2, blocklens[0] = 1, blocklens[1]=1, + offsets[0] = 0, offsets[1] = 1(byte), + bas_type[0]=datatype1, + bas_type[1] = datatype2; + + 3) Setting MPI file view with advtype + 4) Writing 2 bytes 1 to 2 using MPI_File_write to a file + 5) File content: + Suppose the fill value of the file is 0(most machines indeed do so) + and Fill value is embraced with "() in the following output: + Expected output should be: + 1,0,2 + + + + However, at some platforms, for example, IBM AIX(at March 23rd, 2005): + the following values were obtained: + 1,2,0 + + The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived + datatype(advtype) has been put after the basic datatype(MPI_BYTE) of datatype2. This is a bug. + + + 2. This test will verify whether the complicated derived datatype is working on + the current platform. + + If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to + tell the developer to change the configuration specific file of HDF5 so that we can change our + configurationsetting to support collective IO for irregular selections. + + If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message + to inform the corresponding failure so that we can turn off collective IO support for irregular selections. + */ + +static int +test_mpio_derived_dtype(char *filename) +{ + + MPI_File fh; + char mpi_err_str[MPI_MAX_ERROR_STRING]; + int mpi_err_strlen; + int mpi_err; + int i; + MPI_Datatype etype, filetype; + MPI_Datatype adv_filetype, bas_filetype[2]; + MPI_Datatype filetypenew; + MPI_Offset disp; + MPI_Status Status; + MPI_Aint adv_disp[2]; + MPI_Aint offsets[1]; + int blocklens[1], adv_blocklens[2]; + int count, outcount; + int retcode; + + int mpi_rank, mpi_size; + + char buf[3], outbuf[3] = {0}; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); retcode = 0; - for(i=0;i<3;i++) - buf[i] = i+1; - + for (i = 0; i < 3; i++) + buf[i] = (char)(i + 1); - if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, - MPI_MODE_RDWR | MPI_MODE_CREATE, - MPI_INFO_NULL, &fh)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_open failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, + &fh)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); + return 1; } disp = 0; etype = MPI_BYTE; - count = 1; + count = 1; blocklens[0] = 1; offsets[0] = 0; - if((mpi_err= MPI_Type_hindexed(count,blocklens,offsets,MPI_BYTE,&filetype)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE, &filetype)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + return 1; } - if((mpi_err=MPI_Type_commit(&filetype))!=MPI_SUCCESS){ + if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); - return 1; + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); + return 1; } - count = 1; - blocklens[0]=1; - offsets[0] = 1; - if((mpi_err= MPI_Type_hindexed(count,blocklens,offsets,MPI_BYTE,&filetypenew)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); - return 1; + count = 1; + blocklens[0] = 1; + offsets[0] = 1; + if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE, &filetypenew)) != + MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + return 1; } - if((mpi_err=MPI_Type_commit(&filetypenew))!=MPI_SUCCESS){ + if ((mpi_err = MPI_Type_commit(&filetypenew)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); - return 1; + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); + return 1; } outcount = 2; @@ -754,242 +746,250 @@ static int test_mpio_derived_dtype(char *filename) { bas_filetype[0] = filetype; bas_filetype[1] = filetypenew; - if((mpi_err= MPI_Type_struct(outcount,adv_blocklens,adv_disp,bas_filetype,&adv_filetype)) - != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_struct failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_Type_create_struct(outcount, adv_blocklens, adv_disp, bas_filetype, &adv_filetype)) != + MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_create_struct failed (%s)\n", mpi_err_str); + return 1; } - if((mpi_err=MPI_Type_commit(&adv_filetype))!=MPI_SUCCESS){ + if ((mpi_err = MPI_Type_commit(&adv_filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); - return 1; + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); + return 1; } + if ((mpi_err = MPI_File_set_view(fh, disp, etype, adv_filetype, "native", MPI_INFO_NULL)) != + MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str); + return 1; + } - if((mpi_err = MPI_File_set_view(fh,disp,etype,adv_filetype,"native",MPI_INFO_NULL))!= MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_set_view failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_File_write(fh, buf, 3, MPI_BYTE, &Status)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_File_write failed (%s)\n", mpi_err_str); + return 1; } - if((mpi_err = MPI_File_write(fh,buf,3,MPI_BYTE,&Status))!= MPI_SUCCESS){ + if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_write failed (%s)\n", mpi_err_str); - return 1; - ; + HDprintf("MPI_File_close failed (%s)\n", mpi_err_str); + return 1; } + if ((mpi_err = MPI_Type_free(&filetype)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_free failed (%s)\n", mpi_err_str); + return 1; + } - if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_close failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_Type_free(&adv_filetype)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_free failed (%s)\n", mpi_err_str); + return 1; } + if ((mpi_err = MPI_Type_free(&filetypenew)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_free failed (%s)\n", mpi_err_str); + return 1; + } - if((mpi_err = MPI_File_open(MPI_COMM_WORLD,filename,MPI_MODE_RDONLY,MPI_INFO_NULL,&fh)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_open failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh)) != + MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); + return 1; } - if((mpi_err = MPI_File_set_view(fh,0,MPI_BYTE,MPI_BYTE,"native",MPI_INFO_NULL))!= MPI_SUCCESS){ + if ((mpi_err = MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_set_view failed (%s)\n", mpi_err_str); - return 1; + HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str); + return 1; } - if((mpi_err = MPI_File_read(fh,outbuf,3,MPI_BYTE,&Status))!=MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_read failed (%s)\n", mpi_err_str); - return 1; + if ((mpi_err = MPI_File_read(fh, outbuf, 3, MPI_BYTE, &Status)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_File_read failed (%s)\n", mpi_err_str); + return 1; } - if(outbuf[2]==2) { - retcode = 0; + if (outbuf[2] == 2) { + retcode = 0; } else { -/* if(mpi_rank == 0) { - printf("complicated derived datatype is NOT working at this platform\n"); - printf("go back to hdf5/config and find the corresponding\n"); - printf("configure-specific file and change ?????\n"); - } -*/ - retcode = -1; - } - - if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){ - MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_close failed (%s)\n", mpi_err_str); - return 1; + /* if(mpi_rank == 0) { + HDprintf("complicated derived datatype is NOT working at this platform\n"); + HDprintf("go back to hdf5/config and find the corresponding\n"); + HDprintf("configure-specific file and change ?????\n"); + } + */ + retcode = -1; } + if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_File_close failed (%s)\n", mpi_err_str); + return 1; + } mpi_err = MPI_Barrier(MPI_COMM_WORLD); - if(retcode == -1) { - if(mpi_rank == 0) { - printf("Complicated derived datatype is NOT working at this platform\n"); - printf(" Please report to help@hdfgroup.org about this problem.\n"); - } - retcode = 1; + if (retcode == -1) { + if (mpi_rank == 0) { + HDprintf("Complicated derived datatype is NOT working at this platform\n"); + HDprintf(" Please report to help@hdfgroup.org about this problem.\n"); + } + retcode = 1; } return retcode; } /* -Function: test_mpio_special_collective + Function: test_mpio_special_collective -Test Whether collective IO is still working when more than one process -has no contribution to IO. To properly test this case, at least FOUR -processes are needed. + Test Whether collective IO is still working when more than one process + has no contribution to IO. To properly test this case, at least FOUR + processes are needed. -1. Details for the test: -1) Create one derived datatype with MPI_Type_hindexed: + 1. Details for the test: + 1) Create one derived datatype with MPI_Type_create_hindexed: -2) Choosing at least two processes to contribute none for IO with - the buf size inside MPI_Write_at_all to 0. -3) Choosing at least two processes to have real contributions for IO. -4) Do collective IO. + 2) Choosing at least two processes to contribute none for IO with + the buf size inside MPI_Write_at_all to 0. + 3) Choosing at least two processes to have real contributions for IO. + 4) Do collective IO. -2. This test will fail with the MPI-IO package that doesn't support this. For example, -mpich 1.2.6. + 2. This test will fail with the MPI-IO package that doesn't support this. For example, + mpich 1.2.6. -If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change -the configuration specific file of HDF5 so that we can change our configurationsetting to support special collective IO; currently only special collective IO. + If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to + tell the developer to change the configuration specific file of HDF5 so that we can change our + configurationsetting to support special collective IO; currently only special collective IO. -If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that -we can turn off the support for special collective IO; currently only special collective IO. -*/ + If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message + to inform the corresponding failure so that we can turn off the support for special collective IO; currently + only special collective IO. + */ static int test_mpio_special_collective(char *filename) { - int mpi_size, mpi_rank; - MPI_File fh; - MPI_Datatype etype,buftype,filetype; - char mpi_err_str[MPI_MAX_ERROR_STRING]; - int mpi_err_strlen; - int mpi_err; - char writedata[2*DIMSIZE]; - char filerep[7] = "native"; - int i; - int count,bufcount; - int blocklens[2]; - MPI_Aint offsets[2]; - MPI_Offset mpi_off = 0; - MPI_Status mpi_stat; - int retcode = 0; + int mpi_size, mpi_rank; + MPI_File fh; + MPI_Datatype etype; + MPI_Datatype filetype = MPI_BYTE; + MPI_Datatype buftype = MPI_BYTE; + char mpi_err_str[MPI_MAX_ERROR_STRING]; + int mpi_err_strlen; + int mpi_err; + char writedata[2 * DIMSIZE]; + char filerep[7] = "native"; + int i; + int count, bufcount; + int blocklens[2]; + MPI_Aint offsets[2]; + MPI_Offset mpi_off = 0; + MPI_Status mpi_stat; + int retcode = 0; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* create MPI data type */ etype = MPI_BYTE; - if(mpi_rank == 0 || mpi_rank == 1) { - count = DIMSIZE; + if (mpi_rank == 0 || mpi_rank == 1) { + count = DIMSIZE; bufcount = 1; } /* end if */ else { - count = 0; + count = 0; bufcount = 0; } /* end else */ blocklens[0] = count; - offsets[0] = mpi_rank*count; + offsets[0] = mpi_rank * count; blocklens[1] = count; - offsets[1] = (mpi_size+mpi_rank)*count; - - if(count !=0) { - if((mpi_err = MPI_Type_hindexed(2, - blocklens, - offsets, - etype, - &filetype)) != MPI_SUCCESS) { + offsets[1] = (mpi_size + mpi_rank) * count; + + if (count != 0) { + if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype, &filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); return 1; } /* end if */ - if((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) { + if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } /* end if */ - if((mpi_err = MPI_Type_hindexed(2, - blocklens, - offsets, - etype, - &buftype)) != MPI_SUCCESS) { + if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype, &buftype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str); return 1; } /* end if */ - if((mpi_err = MPI_Type_commit(&buftype)) != MPI_SUCCESS) { + if ((mpi_err = MPI_Type_commit(&buftype)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_Type_commit failed (%s)\n", mpi_err_str); + HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str); return 1; } /* end if */ - } /* end if */ - else { - filetype = MPI_BYTE; - buftype = MPI_BYTE; - } /* end else */ + } /* end if */ /* Open a file */ - if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, - filename, - MPI_MODE_RDWR | MPI_MODE_CREATE, - MPI_INFO_NULL, + if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_open failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_open failed (%s)\n", mpi_err_str); return 1; } /* end if */ /* each process writes some data */ - for (i=0; i < 2*DIMSIZE; i++) - writedata[i] = (char)(mpi_rank*DIMSIZE + i); + for (i = 0; i < 2 * DIMSIZE; i++) + writedata[i] = (char)(mpi_rank * DIMSIZE + i); /* Set the file view */ - if((mpi_err = MPI_File_set_view(fh, - mpi_off, - MPI_BYTE, - filetype, - filerep, - MPI_INFO_NULL)) != MPI_SUCCESS) { + if ((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, filerep, MPI_INFO_NULL)) != + MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_set_view failed (%s)\n", mpi_err_str); + HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str); return 1; } /* end if */ + if (filetype != MPI_BYTE && (mpi_err = MPI_Type_free(&filetype)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_free failed (%s)\n", mpi_err_str); + return 1; + } + /* Collectively write into the file */ - if ((mpi_err = MPI_File_write_at_all(fh, - mpi_off, - writedata, - bufcount, - buftype, - &mpi_stat)) != MPI_SUCCESS) { + if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, writedata, bufcount, buftype, &mpi_stat)) != + MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", - (long) mpi_off, bufcount, mpi_err_str); + HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long)mpi_off, bufcount, + mpi_err_str); return 1; } /* end if */ + if (buftype != MPI_BYTE && (mpi_err = MPI_Type_free(&buftype)) != MPI_SUCCESS) { + MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); + HDprintf("MPI_Type_free failed (%s)\n", mpi_err_str); + return 1; + } + /* Close the file */ if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) { MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); - printf("MPI_File_close failed. \n"); + HDprintf("MPI_File_close failed. \n"); return 1; } /* end if */ /* Perform a barrier */ mpi_err = MPI_Barrier(MPI_COMM_WORLD); - if(retcode != 0) { - if(mpi_rank == 0) { - printf("special collective IO is NOT working at this platform\n"); - printf(" Please report to help@hdfgroup.org about this problem.\n"); + if (retcode != 0) { + if (mpi_rank == 0) { + HDprintf("special collective IO is NOT working at this platform\n"); + HDprintf(" Please report to help@hdfgroup.org about this problem.\n"); } /* end if */ retcode = 1; } /* end if */ @@ -1004,72 +1004,74 @@ test_mpio_special_collective(char *filename) static int parse_options(int argc, char **argv) { - while (--argc){ - if (**(++argv) != '-'){ - break; - }else{ - switch(*(*argv+1)){ - case 'v': if (*((*argv+1)+1)) - ParseTestVerbosity((*argv+1)+1); - else - SetTestVerbosity(VERBO_MED); - break; - case 'f': if (--argc < 1) { - nerrors++; - return(1); - } - if (**(++argv) == '-') { - nerrors++; - return(1); - } - paraprefix = *argv; - break; - case 'h': /* print help message--return with nerrors set */ - return(1); - default: nerrors++; - return(1); - } - } + while (--argc) { + if (**(++argv) != '-') { + break; + } + else { + switch (*(*argv + 1)) { + case 'v': + if (*((*argv + 1) + 1)) + ParseTestVerbosity((*argv + 1) + 1); + else + SetTestVerbosity(VERBO_MED); + break; + case 'f': + if (--argc < 1) { + nerrors++; + return (1); + } + if (**(++argv) == '-') { + nerrors++; + return (1); + } + paraprefix = *argv; + break; + case 'h': /* print help message--return with nerrors set */ + return (1); + default: + nerrors++; + return (1); + } + } } /*while*/ /* compose the test filenames */ { - int i, n; - hid_t plist; - - plist = H5Pcreate (H5P_FILE_ACCESS); - H5Pset_fapl_mpio(plist, MPI_COMM_WORLD, MPI_INFO_NULL); - n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - - for (i=0; i < n; i++) - if (h5_fixname(FILENAME[i],plist,filenames[i],sizeof(filenames[i])) - == NULL){ - printf("h5_fixname failed\n"); - nerrors++; - return(1); - } - H5Pclose(plist); - if (VERBOSE_MED){ - printf("Test filenames are:\n"); - for (i=0; i < n; i++) - printf(" %s\n", filenames[i]); - } + int i, n; + hid_t plist; + + plist = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(plist, MPI_COMM_WORLD, MPI_INFO_NULL); + n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + + for (i = 0; i < n; i++) + if (h5_fixname(FILENAME[i], plist, filenames[i], sizeof(filenames[i])) == NULL) { + HDprintf("h5_fixname failed\n"); + nerrors++; + return (1); + } + H5Pclose(plist); + if (VERBOSE_MED) { + HDprintf("Test filenames are:\n"); + for (i = 0; i < n; i++) + HDprintf(" %s\n", filenames[i]); + } } - return(0); + return (0); } - /* * Show command usage */ static void usage(void) { - printf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n"); - printf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n"); - printf("\t-f <prefix>\tfilename prefix\n"); - printf("\n"); + HDprintf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n"); + HDprintf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n"); + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\n"); } /* @@ -1080,14 +1082,13 @@ errors_sum(int nerrs) { int temp; MPI_Allreduce(&nerrs, &temp, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); - return(temp); + return (temp); } - int main(int argc, char **argv) { - int mpi_size, mpi_rank; /* mpi variables */ + int mpi_size, mpi_rank; /* mpi variables */ int ret_code; MPI_Init(&argc, &argv); @@ -1099,31 +1100,30 @@ main(int argc, char **argv) * hang in the atexit post processing in which it may try to make MPI * calls. By then, MPI calls may not work. */ - if (H5dont_atexit() < 0){ - printf("Failed to turn off atexit processing. Continue.\n", mpi_rank); + if (H5dont_atexit() < 0) { + HDprintf("Failed to turn off atexit processing. Continue.\n"); }; H5open(); - if (parse_options(argc, argv) != 0){ - if (MAINPROCESS) - usage(); - goto finish; + if (parse_options(argc, argv) != 0) { + if (MAINPROCESS) + usage(); + goto finish; } - if (MAINPROCESS){ - printf("===================================\n"); - printf("MPI functionality tests\n"); - printf("===================================\n"); + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("MPI functionality tests\n"); + HDprintf("===================================\n"); } if (VERBOSE_MED) - h5_show_hostname(); + h5_show_hostname(); - fapl = H5Pcreate (H5P_FILE_ACCESS); + fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* set alarm. */ - ALARM_ON; - + TestAlarmOn(); /*======================================= * MPIO 1 write Many read test @@ -1131,31 +1131,30 @@ main(int argc, char **argv) MPI_BANNER("MPIO 1 write Many read test..."); ret_code = test_mpio_1wMr(filenames[0], USENONE); ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; } /* test atomicity and file sync in high verbose mode only */ /* since they often hang when broken and PHDF5 does not use them. */ - if (VERBOSE_HI){ - MPI_BANNER("MPIO 1 write Many read test with atomicity..."); - ret_code = test_mpio_1wMr(filenames[0], USEATOM); - ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; - } - - MPI_BANNER("MPIO 1 write Many read test with file sync..."); - ret_code = test_mpio_1wMr(filenames[0], USEFSYNC); - ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; - } - } + if (VERBOSE_HI) { + MPI_BANNER("MPIO 1 write Many read test with atomicity..."); + ret_code = test_mpio_1wMr(filenames[0], USEATOM); + ret_code = errors_sum(ret_code); + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; + } + MPI_BANNER("MPIO 1 write Many read test with file sync..."); + ret_code = test_mpio_1wMr(filenames[0], USEFSYNC); + ret_code = errors_sum(ret_code); + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; + } + } /*======================================= * MPIO MPIO File size range test @@ -1164,13 +1163,13 @@ main(int argc, char **argv) #ifndef H5_HAVE_WIN32_API ret_code = test_mpio_gb_file(filenames[0]); ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; } #else - if (mpi_rank==0) - printf(" will be skipped on Windows (JIRA HDDFV-8064)\n"); + if (mpi_rank == 0) + HDprintf(" will be skipped on Windows (JIRA HDDFV-8064)\n"); #endif /*======================================= @@ -1179,9 +1178,9 @@ main(int argc, char **argv) MPI_BANNER("MPIO independent overlapping writes..."); ret_code = test_mpio_overlap_writes(filenames[0]); ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; } /*======================================= @@ -1190,9 +1189,9 @@ main(int argc, char **argv) MPI_BANNER("MPIO complicated derived datatype test..."); ret_code = test_mpio_derived_dtype(filenames[0]); ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; } /*======================================= @@ -1201,7 +1200,7 @@ main(int argc, char **argv) if (mpi_size < 4) { MPI_BANNER("MPIO special collective io test SKIPPED."); if (mpi_rank == 0) - printf("This test needs at least four processes to run.\n"); + HDprintf("This test needs at least four processes to run.\n"); ret_code = 0; goto sc_finish; } /* end if */ @@ -1211,38 +1210,36 @@ main(int argc, char **argv) sc_finish: ret_code = errors_sum(ret_code); - if (mpi_rank==0 && ret_code > 0){ - printf("***FAILED with %d total errors\n", ret_code); - nerrors += ret_code; + if (mpi_rank == 0 && ret_code > 0) { + HDprintf("***FAILED with %d total errors\n", ret_code); + nerrors += ret_code; } - finish: /* make sure all processes are finished before final report, cleanup * and exit. */ MPI_Barrier(MPI_COMM_WORLD); - if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); - if (nerrors){ - printf("***MPI tests detected %d errors***\n", nerrors); - } - else{ - printf("MPI tests finished with no errors\n"); - } - printf("===================================\n"); + if (MAINPROCESS) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrors) { + HDprintf("***MPI tests detected %d errors***\n", nerrors); + } + else { + HDprintf("MPI tests finished with no errors\n"); + } + HDprintf("===================================\n"); } /* turn off alarm */ - ALARM_OFF; + TestAlarmOff(); - h5_cleanup(FILENAME, fapl); + h5_clean_files(FILENAME, fapl); H5close(); /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ MPI_Finalize(); /* cannot just return (nerrors) because exit code is limited to 1byte */ - return(nerrors!=0); + return (nerrors != 0); } - diff --git a/testpar/t_oflush.c b/testpar/t_oflush.c new file mode 100644 index 0000000..4a91be1 --- /dev/null +++ b/testpar/t_oflush.c @@ -0,0 +1,117 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Test for H5Oflush. For the current design, H5Oflush doesn't work correctly + * with parallel. It causes an assertion failure in metadata cache during + * H5Fclose. This test makes sure H5Oflush fails for dataset, group, and named + * datatype properly until the problem is solved. */ + +#include "testphdf5.h" +#include "H5Dprivate.h" +#include "H5private.h" + +#define DATASETNAME "IntArray" +#define NX 5 +#define NY 6 +#define RANK 2 + +void +test_oflush(void) +{ + int mpi_size, mpi_rank; + hid_t file, dataset; + hid_t dataspace; + hid_t fapl_id; + const char *filename; + hid_t gid, dtype_flush; + hsize_t dimsf[2]; + herr_t ret; + int data[NX][NY]; + int i, j; + + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + + /* Make sure MPIO driver is used */ + fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, FACC_MPIO); + VRFY((fapl_id >= 0), "fapl creation succeeded"); + + /* Data buffer initialization */ + for (j = 0; j < NX; j++) + for (i = 0; i < NY; i++) + data[j][i] = i + j; + + filename = GetTestParameters(); + + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file >= 0), "file creation succeeded"); + + /* Describe the size of the array and create the data space for fixed + * size dataset */ + dimsf[0] = NX; + dimsf[1] = NY; + + dataspace = H5Screate_simple(RANK, dimsf, NULL); + VRFY((dataspace >= 0), "data space creation succeeded"); + + /* Create a new dataset within the file using defined dataspace and + * datatype and default dataset creation properties */ + dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dataset >= 0), "dataset creation succeeded"); + + /* Write the data to the dataset using default transfer properties */ + ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data); + VRFY((ret >= 0), "dataset creation succeeded"); + + /* Make sure H5Oflush fails with dataset */ + H5E_BEGIN_TRY + { + ret = H5Oflush(dataset); + } + H5E_END_TRY + VRFY((ret < 0), "H5Oflush should fail as expected"); + + H5Sclose(dataspace); + H5Dclose(dataset); + + /* Create a group */ + gid = H5Gcreate(file, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((gid >= 0), "group creation succeeded"); + + /* Make sure H5Oflush fails with group */ + H5E_BEGIN_TRY + { + ret = H5Oflush(gid); + } + H5E_END_TRY + VRFY((ret < 0), "H5Oflush should fail as expected"); + + H5Gclose(gid); + + /* Create a named datatype */ + dtype_flush = H5Tcopy(H5T_NATIVE_INT); + H5Tcommit(file, "dtype", dtype_flush, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + + /* Make sure H5Oflush fails with named datatype */ + H5E_BEGIN_TRY + { + ret = H5Oflush(dtype_flush); + } + H5E_END_TRY + VRFY((ret < 0), "H5Oflush should fail as expected"); + + H5Tclose(dtype_flush); + + /* Close and release resources */ + H5Fclose(file); + H5Pclose(fapl_id); +} diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c index 1bcfeb8..edfbcfe 100644 --- a/testpar/t_pflush1.c +++ b/testpar/t_pflush1.c @@ -1,202 +1,223 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * Programmer: Leon Arber <larber@uiuc.edu> * Sept. 28, 2006. * - * Purpose: This is the first half of a two-part test that makes sure - * that a file can be read after a parallel application crashes as long - * as the file was flushed first. We simulate a crash by - * calling _exit(0) since this doesn't flush HDF5 caches but - * still exits with success. + * Purpose: This is the first half of a two-part test that makes sure + * that a file can be read after a parallel application crashes + * as long as the file was flushed first. We simulate a crash by + * calling _exit() since this doesn't flush HDF5 caches but + * still exits with success. */ -#include <mpi.h> #include "h5test.h" -const char *FILENAME[] = { - "flush", - "noflush", - NULL -}; +const char *FILENAME[] = {"flush", "noflush", NULL}; -static double the_data[100][100]; +static int *data_g = NULL; + +#define N_GROUPS 100 /*------------------------------------------------------------------------- - * Function: create_file - * - * Purpose: Creates file used in part 1 of the test + * Function: create_test_file * - * Return: Success: 0 + * Purpose: Creates the file used in part 1 of the test * - * Failure: 1 + * Return: Success: A valid file ID + * Failure: H5I_INVALID_HID * - * Programmer: Leon Arber + * Programmer: Leon Arber * Sept. 26, 2006 * - * Modifications: - * *------------------------------------------------------------------------- */ static hid_t -create_file(char* name, hid_t fapl) +create_test_file(char *name, size_t name_length, hid_t fapl_id) { - hid_t file, dcpl, space, dset, groups, grp, plist; - hsize_t ds_size[2] = {100, 100}; - hsize_t ch_size[2] = {5, 5}; - hsize_t i, j; - - - - if((file=H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) goto error; + hid_t fid = H5I_INVALID_HID; + hid_t dcpl_id = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t top_level_gid = H5I_INVALID_HID; + hid_t gid = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hsize_t dims[2] = {100, 100}; + hsize_t chunk_dims[2] = {5, 5}; + hsize_t i, j; + + if ((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) + goto error; /* Create a chunked dataset */ - if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error; - if(H5Pset_chunk(dcpl, 2, ch_size) < 0) goto error; - if((space = H5Screate_simple(2, ds_size, NULL)) < 0) goto error; - if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) - goto error; - - plist = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); - + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) + goto error; + if (H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0) + goto error; + if ((sid = H5Screate_simple(2, dims, NULL)) < 0) + goto error; + if ((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + + if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) + goto error; + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) + goto error; /* Write some data */ - for(i = 0; i < ds_size[0]; i++) { - /* - * The extra cast in the following statement is a bug workaround - * for the Win32 version 5.0 compiler. - * 1998-11-06 ptl - */ - for(j = 0; j < ds_size[1]; j++) - the_data[i][j] = (double)(hssize_t)i/(hssize_t)(j+1); - } - if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, plist, the_data) < 0) goto error; + for (i = 0; i < dims[0]; i++) + for (j = 0; j < dims[1]; j++) + data_g[(i * 100) + j] = (int)(i + (i * j) + j); + + if (H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0) + goto error; /* Create some groups */ - if((groups = H5Gcreate2(file, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error; - for(i = 0; i < 100; i++) { - sprintf(name, "grp%02u", (unsigned)i); - if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error; - if(H5Gclose(grp) < 0) goto error; + if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + for (i = 0; i < N_GROUPS; i++) { + HDsnprintf(name, name_length, "grp%02u", (unsigned)i); + if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) + goto error; + if (H5Gclose(gid) < 0) + goto error; } - return file; + return fid; error: - HD_exit(1); -} + return H5I_INVALID_HID; +} /* end create_test_file() */ /*------------------------------------------------------------------------- - * Function: main + * Function: main * - * Purpose: Part 1 of a two-part H5Fflush() test. + * Purpose: Part 1 of a two-part parallel H5Fflush() test. * - * Return: Success: 0 + * Return: EXIT_FAILURE (always) * - * Failure: 1 - * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Friday, October 23, 1998 * - * Modifications: - * Leon Arber - * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called. - * - * *------------------------------------------------------------------------- */ int -main(int argc, char* argv[]) +main(int argc, char *argv[]) { - hid_t file1, file2, fapl; - MPI_File *mpifh_p = NULL; - char name[1024]; - const char *envval = NULL; - int mpi_size, mpi_rank; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; + hid_t fid1 = H5I_INVALID_HID; + hid_t fid2 = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + MPI_File *mpifh_p = NULL; + char name[1024]; + const char *envval = NULL; + int mpi_size; + int mpi_rank; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; MPI_Init(&argc, &argv); MPI_Comm_size(comm, &mpi_size); MPI_Comm_rank(comm, &mpi_rank); - fapl = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(fapl, comm, info); + if (mpi_rank == 0) + TESTING("H5Fflush (part1)"); - if(mpi_rank == 0) - TESTING("H5Fflush (part1)"); - envval = HDgetenv("HDF5_DRIVER"); - if(envval == NULL) + /* Don't run using the split VFD */ + envval = HDgetenv(HDF5_DRIVER); + if (envval == NULL) envval = "nomatch"; - if(HDstrcmp(envval, "split")) { - /* Create the file */ - h5_fixname(FILENAME[0], fapl, name, sizeof name); - file1 = create_file(name, fapl); - /* Flush and exit without closing the library */ - if(H5Fflush(file1, H5F_SCOPE_GLOBAL) < 0) goto error; - - /* Create the other file which will not be flushed */ - h5_fixname(FILENAME[1], fapl, name, sizeof name); - file2 = create_file(name, fapl); - - - if(mpi_rank == 0) - PASSED(); - fflush(stdout); - fflush(stderr); - } /* end if */ - else { - SKIPPED(); - puts(" Test not compatible with current Virtual File Driver"); - } /* end else */ - - /* - * Some systems like AIX do not like files not closed when MPI_Finalize + + if (!HDstrcmp(envval, "split")) { + if (mpi_rank == 0) { + SKIPPED(); + HDputs(" Test not compatible with current Virtual File Driver"); + } + MPI_Finalize(); + HDexit(EXIT_FAILURE); + } + + if (NULL == (data_g = HDmalloc(100 * 100 * sizeof(*data_g)))) + goto error; + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + goto error; + if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) + goto error; + + /* Create the file */ + h5_fixname(FILENAME[0], fapl_id, name, sizeof(name)); + if ((fid1 = create_test_file(name, sizeof(name), fapl_id)) < 0) + goto error; + /* Flush and exit without closing the library */ + if (H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0) + goto error; + + /* Create the other file which will not be flushed */ + h5_fixname(FILENAME[1], fapl_id, name, sizeof(name)); + if ((fid2 = create_test_file(name, sizeof(name), fapl_id)) < 0) + goto error; + + if (mpi_rank == 0) + PASSED(); + + HDfflush(stdout); + HDfflush(stderr); + + /* Some systems like AIX do not like files not being closed when MPI_Finalize * is called. So, we need to get the MPI file handles, close them by hand. * Then the _exit is still needed to stop at_exit from happening in some systems. * Note that MPIO VFD returns the address of the file-handle in the VFD struct * because MPI_File_close wants to modify the file-handle variable. */ - /* close file1 */ - if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0) { - printf("H5Fget_vfd_handle for file1 failed\n"); - goto error; - } /* end if */ - if(MPI_File_close(mpifh_p) != MPI_SUCCESS) { - printf("MPI_File_close for file1 failed\n"); - goto error; - } /* end if */ - /* close file2 */ - if(H5Fget_vfd_handle(file2, fapl, (void **)&mpifh_p) < 0) { - printf("H5Fget_vfd_handle for file2 failed\n"); - goto error; - } /* end if */ - if(MPI_File_close(mpifh_p) != MPI_SUCCESS) { - printf("MPI_File_close for file2 failed\n"); - goto error; - } /* end if */ - - fflush(stdout); - fflush(stderr); - HD_exit(0); + /* Close file 1 */ + if (H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0) + goto error; + if (MPI_File_close(mpifh_p) != MPI_SUCCESS) + goto error; + + /* Close file 2 */ + if (H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0) + goto error; + if (MPI_File_close(mpifh_p) != MPI_SUCCESS) + goto error; + + HDfflush(stdout); + HDfflush(stderr); + + if (data_g) { + HDfree(data_g); + data_g = NULL; + } + + /* Always exit with a failure code! + * + * In accordance with the standard, not having all processes + * call MPI_Finalize() can be considered an error, so mpiexec + * et al. may indicate failure on return. It's much easier to + * always ignore the failure condition than to handle some + * platforms returning success and others failure. + */ + HD_exit(EXIT_FAILURE); error: - fflush(stdout); - fflush(stderr); - HD_exit(1); -} + HDfflush(stdout); + HDfflush(stderr); + HDprintf("*** ERROR ***\n"); + HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n"); + HDfflush(stdout); + + if (data_g) + HDfree(data_g); + HD_exit(EXIT_FAILURE); +} /* end main() */ diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c index 03f7c82..3e42351 100644 --- a/testpar/t_pflush2.c +++ b/testpar/t_pflush2.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -26,193 +23,211 @@ #include "h5test.h" -const char *FILENAME[] = { - "flush", - "noflush", - NULL -}; +const char *FILENAME[] = {"flush", "noflush", NULL}; -static double the_data[100][100]; +static int *data_g = NULL; +#define N_GROUPS 100 /*------------------------------------------------------------------------- - * Function: check_file + * Function: check_test_file * - * Purpose: Part 2 of a two-part H5Fflush() test. + * Purpose: Part 2 of a two-part H5Fflush() test. * - * Return: Success: 0 + * Return: SUCCEED/FAIL * - * Failure: 1 - * - * Programmer: Leon Arber + * Programmer: Leon Arber * Sept. 26, 2006. * *------------------------------------------------------------------------- */ -static int -check_file(char* name, hid_t fapl) +static herr_t +check_test_file(char *name, size_t name_length, hid_t fapl_id) { - hid_t file, space, dset, groups, grp, plist; - hsize_t ds_size[2]; - double error; - hsize_t i, j; - - plist = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE); - if((file = H5Fopen(name, H5F_ACC_RDONLY, fapl)) < 0) goto error; + hid_t fid = H5I_INVALID_HID; + hid_t sid = H5I_INVALID_HID; + hid_t did = H5I_INVALID_HID; + hid_t top_level_gid = H5I_INVALID_HID; + hid_t gid = H5I_INVALID_HID; + hid_t dxpl_id = H5I_INVALID_HID; + hsize_t dims[2]; + int val; + hsize_t i, j; + + if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) + goto error; + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) + goto error; + if ((fid = H5Fopen(name, H5F_ACC_RDONLY, fapl_id)) < 0) + goto error; /* Open the dataset */ - if((dset = H5Dopen2(file, "dset", H5P_DEFAULT)) < 0) goto error; - if((space = H5Dget_space(dset)) < 0) goto error; - if(H5Sget_simple_extent_dims(space, ds_size, NULL) < 0) goto error; - assert(100==ds_size[0] && 100==ds_size[1]); + if ((did = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0) + goto error; + if ((sid = H5Dget_space(did)) < 0) + goto error; + if (H5Sget_simple_extent_dims(sid, dims, NULL) < 0) + goto error; + HDassert(100 == dims[0] && 100 == dims[1]); /* Read some data */ - if (H5Dread(dset, H5T_NATIVE_DOUBLE, space, space, plist, - the_data) < 0) goto error; - for (i=0; i<ds_size[0]; i++) { - for (j=0; j<ds_size[1]; j++) { - /* - * The extra cast in the following statement is a bug workaround - * for the Win32 version 5.0 compiler. - * 1998-11-06 ptl - */ - error = fabs(the_data[i][j]-(double)(hssize_t)i/((hssize_t)j+1)); - if (error>0.0001) { - H5_FAILED(); - printf(" dset[%lu][%lu] = %g\n", - (unsigned long)i, (unsigned long)j, the_data[i][j]); - printf(" should be %g\n", - (double)(hssize_t)i/(hssize_t)(j+1)); - goto error; - } - } + if (H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0) + goto error; + for (i = 0; i < dims[0]; i++) { + for (j = 0; j < dims[1]; j++) { + val = (int)(i + (i * j) + j); + if (data_g[(i * 100) + j] != val) { + H5_FAILED(); + HDprintf(" data_g[%lu][%lu] = %d\n", (unsigned long)i, (unsigned long)j, + data_g[(i * 100) + j]); + HDprintf(" should be %d\n", val); + } + } } /* Open some groups */ - if((groups = H5Gopen2(file, "some_groups", H5P_DEFAULT)) < 0) goto error; - for(i = 0; i < 100; i++) { - sprintf(name, "grp%02u", (unsigned)i); - if((grp = H5Gopen2(groups, name, H5P_DEFAULT)) < 0) goto error; - if(H5Gclose(grp) < 0) goto error; + if ((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0) + goto error; + for (i = 0; i < N_GROUPS; i++) { + HDsnprintf(name, name_length, "grp%02u", (unsigned)i); + if ((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0) + goto error; + if (H5Gclose(gid) < 0) + goto error; } - if(H5Gclose(groups) < 0) goto error; - if(H5Dclose(dset) < 0) goto error; - if(H5Fclose(file) < 0) goto error; - if(H5Pclose(plist) < 0) goto error; - if(H5Sclose(space) < 0) goto error; + if (H5Gclose(top_level_gid) < 0) + goto error; + if (H5Dclose(did) < 0) + goto error; + if (H5Fclose(fid) < 0) + goto error; + if (H5Pclose(dxpl_id) < 0) + goto error; + if (H5Sclose(sid) < 0) + goto error; - return 0; + return SUCCEED; error: - H5E_BEGIN_TRY { - H5Pclose(plist); - H5Gclose(groups); - H5Dclose(dset); - H5Fclose(file); - H5Sclose(space); - } H5E_END_TRY; - return 1; -} + H5E_BEGIN_TRY + { + H5Pclose(dxpl_id); + H5Gclose(top_level_gid); + H5Dclose(did); + H5Fclose(fid); + H5Sclose(sid); + H5Gclose(gid); + } + H5E_END_TRY; + return FAIL; +} /* end check_test_file() */ /*------------------------------------------------------------------------- - * Function: main - * - * Purpose: Part 2 of a two-part H5Fflush() test. + * Function: main * - * Return: Success: 0 + * Purpose: Part 2 of a two-part H5Fflush() test. * - * Failure: 1 + * Return: EXIT_SUCCESS/EXIT_FAIL * - * Programmer: Robb Matzke + * Programmer: Robb Matzke * Friday, October 23, 1998 * - * Modifications: - * Leon Arber - * Sept. 26, 2006, expand to check for case where the was file not flushed. - * *------------------------------------------------------------------------- */ int -main(int argc, char* argv[]) +main(int argc, char *argv[]) { + hid_t fapl_id1 = H5I_INVALID_HID; + hid_t fapl_id2 = H5I_INVALID_HID; H5E_auto2_t func; - char name[1024]; + char name[1024]; const char *envval = NULL; - int mpi_size, mpi_rank; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; + int mpi_size; + int mpi_rank; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; MPI_Init(&argc, &argv); MPI_Comm_size(comm, &mpi_size); MPI_Comm_rank(comm, &mpi_rank); - if(mpi_rank == 0) - TESTING("H5Fflush (part2 with flush)"); + if (mpi_rank == 0) + TESTING("H5Fflush (part2 with flush)"); - /* Don't run this test using the core or split file drivers */ - envval = HDgetenv("HDF5_DRIVER"); + /* Don't run using the split VFD */ + envval = HDgetenv(HDF5_DRIVER); if (envval == NULL) envval = "nomatch"; - if (HDstrcmp(envval, "core") && HDstrcmp(envval, "split")) { - hid_t fapl1, fapl2; - - fapl1 = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(fapl1, comm, info); - - fapl2 = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(fapl2, comm, info); - - /* Check the case where the file was flushed */ - h5_fixname(FILENAME[0], fapl1, name, sizeof name); - if(check_file(name, fapl1)) - { - H5_FAILED() - goto error; - } - else if(mpi_rank == 0) - { - PASSED() - } - - /* Check the case where the file was not flushed. This should give an error - * so we turn off the error stack temporarily */ - if(mpi_rank == 0) - TESTING("H5Fflush (part2 without flush)"); - H5Eget_auto2(H5E_DEFAULT,&func,NULL); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - h5_fixname(FILENAME[1], fapl2, name, sizeof name); - if(check_file(name, fapl2)) - { - if(mpi_rank == 0) - { - PASSED() - } - } - else - { - H5_FAILED() - goto error; - } - H5Eset_auto2(H5E_DEFAULT, func, NULL); - - - h5_cleanup(&FILENAME[0], fapl1); - h5_cleanup(&FILENAME[1], fapl2); + + if (!HDstrcmp(envval, "split")) { + if (mpi_rank == 0) { + SKIPPED(); + HDputs(" Test not compatible with current Virtual File Driver"); + } + MPI_Finalize(); + HDexit(EXIT_SUCCESS); } - else - { - SKIPPED(); - puts(" Test not compatible with current Virtual File Driver"); + + if (NULL == (data_g = HDmalloc(100 * 100 * sizeof(*data_g)))) + goto error; + + if ((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0) + goto error; + if (H5Pset_fapl_mpio(fapl_id1, comm, info) < 0) + goto error; + + if ((fapl_id2 = H5Pcreate(H5P_FILE_ACCESS)) < 0) + goto error; + if (H5Pset_fapl_mpio(fapl_id2, comm, info) < 0) + goto error; + + /* Check the case where the file was flushed */ + h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name)); + if (check_test_file(name, sizeof(name), fapl_id1)) { + H5_FAILED(); + goto error; + } + else if (mpi_rank == 0) { + PASSED(); + } + + /* Check the case where the file was not flushed. This should give an error + * so we turn off the error stack temporarily. + */ + if (mpi_rank == 0) + TESTING("H5Fflush (part2 without flush)"); + H5Eget_auto2(H5E_DEFAULT, &func, NULL); + H5Eset_auto2(H5E_DEFAULT, NULL, NULL); + + h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name)); + if (check_test_file(name, sizeof(name), fapl_id2)) { + if (mpi_rank == 0) + PASSED(); + } + else { + H5_FAILED(); + goto error; + } + + H5Eset_auto2(H5E_DEFAULT, func, NULL); + + h5_clean_files(&FILENAME[0], fapl_id1); + h5_clean_files(&FILENAME[1], fapl_id2); + + if (data_g) { + HDfree(data_g); + data_g = NULL; } MPI_Finalize(); - return 0; - error: - return 1; -} + HDexit(EXIT_SUCCESS); + +error: + if (data_g) + HDfree(data_g); + HDexit(EXIT_FAILURE); +} /* end main() */ diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c index 76eeaef..cef5d12 100644 --- a/testpar/t_ph5basic.c +++ b/testpar/t_ph5basic.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -19,70 +16,67 @@ #include "testphdf5.h" - /*------------------------------------------------------------------------- * Function: test_fapl_mpio_dup * * Purpose: Test if fapl_mpio property list keeps a duplicate of the - * communicator and INFO objects given when set; and returns - * duplicates of its components when H5Pget_fapl_mpio is called. - * - * Return: Success: None + * communicator and INFO objects given when set; and returns + * duplicates of its components when H5Pget_fapl_mpio is called. * - * Failure: Abort + * Return: Success: None + * Failure: Abort * * Programmer: Albert Cheng * January 9, 2003 * - * Modifications: *------------------------------------------------------------------------- */ void test_fapl_mpio_dup(void) { - int mpi_size, mpi_rank; + int mpi_size, mpi_rank; MPI_Comm comm, comm_tmp; - int mpi_size_old, mpi_rank_old; - int mpi_size_tmp, mpi_rank_tmp; - MPI_Info info = MPI_INFO_NULL; + int mpi_size_old, mpi_rank_old; + int mpi_size_tmp, mpi_rank_tmp; + MPI_Info info = MPI_INFO_NULL; MPI_Info info_tmp = MPI_INFO_NULL; - int mrc; /* MPI return value */ - hid_t acc_pl; /* File access properties */ - herr_t ret; /* hdf5 return value */ - int nkeys, nkeys_tmp; + int mrc; /* MPI return value */ + hid_t acc_pl; /* File access properties */ + herr_t ret; /* HDF5 return value */ + int nkeys, nkeys_tmp; if (VERBOSE_MED) - printf("Verify fapl_mpio duplicates communicator and INFO objects\n"); + HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n"); /* set up MPI parameters */ - MPI_Comm_size(MPI_COMM_WORLD,&mpi_size); - MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if (VERBOSE_MED) - printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size); + HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size); /* Create a new communicator that has the same processes as MPI_COMM_WORLD. - * Use MPI_Comm_split because it is simplier than MPI_Comm_create + * Use MPI_Comm_split because it is simpler than MPI_Comm_create */ mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split"); - MPI_Comm_size(comm,&mpi_size_old); - MPI_Comm_rank(comm,&mpi_rank_old); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split"); + MPI_Comm_size(comm, &mpi_size_old); + MPI_Comm_rank(comm, &mpi_rank_old); if (VERBOSE_MED) - printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old); + HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old); /* create a new INFO object with some trivial information. */ mrc = MPI_Info_create(&info); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_create"); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_create"); mrc = MPI_Info_set(info, "hdf_info_name", "XYZ"); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_set"); - if (MPI_INFO_NULL != info){ - mrc=MPI_Info_get_nkeys(info, &nkeys); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys"); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_set"); + if (MPI_INFO_NULL != info) { + mrc = MPI_Info_get_nkeys(info, &nkeys); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); } if (VERBOSE_MED) - h5_dump_info_object(info); + h5_dump_info_object(info); - acc_pl = H5Pcreate (H5P_FILE_ACCESS); + acc_pl = H5Pcreate(H5P_FILE_ACCESS); VRFY((acc_pl >= 0), "H5P_FILE_ACCESS"); ret = H5Pset_fapl_mpio(acc_pl, comm, info); @@ -94,28 +88,27 @@ test_fapl_mpio_dup(void) * valid communicator and INFO object. */ mrc = MPI_Comm_free(&comm); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free"); - if (MPI_INFO_NULL!=info){ - mrc = MPI_Info_free(&info); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_free"); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + if (MPI_INFO_NULL != info) { + mrc = MPI_Info_free(&info); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); } ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp); VRFY((ret >= 0), "H5Pget_fapl_mpio"); - MPI_Comm_size(comm_tmp,&mpi_size_tmp); - MPI_Comm_rank(comm_tmp,&mpi_rank_tmp); + MPI_Comm_size(comm_tmp, &mpi_size_tmp); + MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); if (VERBOSE_MED) - printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", - mpi_rank_tmp, mpi_size_tmp); - VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size"); - VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank"); - if (MPI_INFO_NULL != info_tmp){ - mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys"); - VRFY((nkeys_tmp==nkeys), "new and old nkeys equal"); + HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); + VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size"); + VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); + VRFY((nkeys_tmp == nkeys), "new and old nkeys equal"); } if (VERBOSE_MED) - h5_dump_info_object(info_tmp); + h5_dump_info_object(info_tmp); /* Case 2: * Free the retrieved communicator and INFO object. @@ -124,70 +117,67 @@ test_fapl_mpio_dup(void) * Also verify the NULL argument option. */ mrc = MPI_Comm_free(&comm_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free"); - if (MPI_INFO_NULL!=info_tmp){ - mrc = MPI_Info_free(&info_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_free"); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_free(&info_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); } /* check NULL argument options. */ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL); VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only"); mrc = MPI_Comm_free(&comm_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free"); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp); VRFY((ret >= 0), "H5Pget_fapl_mpio Info only"); - if (MPI_INFO_NULL!=info_tmp){ - mrc = MPI_Info_free(&info_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_free"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_free(&info_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); } ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL); VRFY((ret >= 0), "H5Pget_fapl_mpio neither"); /* now get both and check validity too. */ - /* Donot free the returned objects which are used in the next case. */ + /* Do not free the returned objects which are used in the next case. */ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp); VRFY((ret >= 0), "H5Pget_fapl_mpio"); - MPI_Comm_size(comm_tmp,&mpi_size_tmp); - MPI_Comm_rank(comm_tmp,&mpi_rank_tmp); + MPI_Comm_size(comm_tmp, &mpi_size_tmp); + MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); if (VERBOSE_MED) - printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", - mpi_rank_tmp, mpi_size_tmp); - VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size"); - VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank"); - if (MPI_INFO_NULL != info_tmp){ - mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys"); - VRFY((nkeys_tmp==nkeys), "new and old nkeys equal"); + HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); + VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size"); + VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); + VRFY((nkeys_tmp == nkeys), "new and old nkeys equal"); } if (VERBOSE_MED) - h5_dump_info_object(info_tmp); + h5_dump_info_object(info_tmp); /* Case 3: * Close the property list and verify the retrieved communicator and INFO * object are still valid. */ H5Pclose(acc_pl); - MPI_Comm_size(comm_tmp,&mpi_size_tmp); - MPI_Comm_rank(comm_tmp,&mpi_rank_tmp); + MPI_Comm_size(comm_tmp, &mpi_size_tmp); + MPI_Comm_rank(comm_tmp, &mpi_rank_tmp); if (VERBOSE_MED) - printf("After Property list closed: rank/size of comm are %d/%d\n", - mpi_rank_tmp, mpi_size_tmp); - if (MPI_INFO_NULL != info_tmp){ - mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys"); + HDprintf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys"); } if (VERBOSE_MED) - h5_dump_info_object(info_tmp); + h5_dump_info_object(info_tmp); /* clean up */ mrc = MPI_Comm_free(&comm_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free"); - if (MPI_INFO_NULL!=info_tmp){ - mrc = MPI_Info_free(&info_tmp); - VRFY((mrc==MPI_SUCCESS), "MPI_Info_free"); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free"); + if (MPI_INFO_NULL != info_tmp) { + mrc = MPI_Info_free(&info_tmp); + VRFY((mrc == MPI_SUCCESS), "MPI_Info_free"); } -} - +} /* end test_fapl_mpio_dup() */ diff --git a/testpar/t_pmulti_dset.c b/testpar/t_pmulti_dset.c new file mode 100644 index 0000000..52d0aa7 --- /dev/null +++ b/testpar/t_pmulti_dset.c @@ -0,0 +1,764 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Programmer: Neil Fortner + * March 10, 2014 + * + * Purpose: Test H5Dwrite_multi() and H5Dread_multi using randomized + * parameters in parallel. Also tests H5Dwrite() and H5Dread() + * using a similar method. + * + * Note that this test currently relies on all processes generating + * the same sequence of random numbers after using a shared seed + * value, therefore it may not work across multiple machines. + */ + +#include "h5test.h" +#include "testpar.h" + +#define T_PMD_ERROR \ + do { \ + nerrors++; \ + H5_FAILED(); \ + AT(); \ + printf("seed = %u\n", seed); \ + } while (0) + +#define FILENAME "pmulti_dset.h5" +#define MAX_DSETS 5 +#define MAX_DSET_X 15 +#define MAX_DSET_Y 10 +#define MAX_CHUNK_X 8 +#define MAX_CHUNK_Y 6 +#define MAX_HS_X 4 +#define MAX_HS_Y 2 +#define MAX_HS 2 +#define MAX_POINTS 6 +#define MAX_SEL_RETRIES 10 +#define OPS_PER_FILE 25 +#define DSET_MAX_NAME_LEN 8 + +/* Option flags */ +#define MDSET_FLAG_CHUNK 0x01u +#define MDSET_FLAG_MLAYOUT 0x02u +#define MDSET_FLAG_SHAPESAME 0x04u +#define MDSET_FLAG_MDSET 0x08u +#define MDSET_FLAG_COLLECTIVE 0x10u +#define MDSET_FLAG_COLLECTIVE_OPT 0x20u +#define MDSET_FLAG_TCONV 0x40u +#define MDSET_FLAG_FILTER 0x80u +#define MDSET_ALL_FLAGS \ + (MDSET_FLAG_CHUNK | MDSET_FLAG_MLAYOUT | MDSET_FLAG_SHAPESAME | MDSET_FLAG_MDSET | \ + MDSET_FLAG_COLLECTIVE | MDSET_FLAG_COLLECTIVE_OPT | MDSET_FLAG_TCONV | MDSET_FLAG_FILTER) + +/* MPI variables */ +int mpi_size; +int mpi_rank; + +/* Names for datasets */ +char dset_name[MAX_DSETS][DSET_MAX_NAME_LEN]; + +/* Random number seed */ +unsigned seed; + +/* Number of errors */ +int nerrors = 0; + +/* Whether these filters are available */ +htri_t deflate_avail = FALSE; +htri_t fletcher32_avail = FALSE; + +/*------------------------------------------------------------------------- + * Function: test_pmdset + * + * Purpose: Test randomized I/O using one or more datasets. Creates a + * file, runs OPS_PER_FILE read or write operations verifying + * that reads return the expected data, then closes the file. + * Runs the test with a new file niter times. + * + * The operations can use either hyperslab or point + * selections. Options are available for chunked or + * contiguous layout, use of multiple datasets and H5D*_multi + * calls, and use of the "shapesame" algorithm code path. To + * avoid the shapesame path when that option is not set, this + * function simply adds a dimension to the memory buffer in a + * way that the shapesame code is not designed to handle. + * + * Return: Number of errors + * + * Programmer: Neil Fortner + * Monday, March 10, 2014 + * + *------------------------------------------------------------------------- + */ +static void +test_pmdset(size_t niter, unsigned flags) +{ + hid_t dset_ids[MAX_DSETS]; + hid_t mem_type_ids[MAX_DSETS]; + hid_t mem_space_ids[MAX_DSETS]; + hid_t file_space_ids[MAX_DSETS]; + void *rbufs[MAX_DSETS]; + const void *wbufs[MAX_DSETS]; + size_t max_dsets; + size_t buf_size; + size_t ndsets; + hid_t file_id = -1; + hid_t fapl_id = -1; + hid_t dcpl_id[MAX_DSETS]; + hid_t dxpl_id = -1; + hsize_t dset_dims[MAX_DSETS][3]; + hsize_t chunk_dims[2]; + hsize_t max_dims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; + unsigned *rbuf = NULL; + unsigned *rbufi[MAX_DSETS][MAX_DSET_X]; + unsigned *erbuf = NULL; + unsigned *erbufi[MAX_DSETS][MAX_DSET_X]; + unsigned *wbuf = NULL; + unsigned *wbufi[MAX_DSETS][MAX_DSET_X]; + unsigned *efbuf = NULL; + unsigned *efbufi[MAX_DSETS][MAX_DSET_X]; + unsigned char *dset_usage; + unsigned char *dset_usagei[MAX_DSETS][MAX_DSET_X]; + hbool_t do_read; + hbool_t last_read; + hbool_t overlap; + hsize_t start[MAX_HS][3]; + hsize_t count[MAX_HS][3]; + hsize_t points[3 * MAX_POINTS]; + int rank_data_diff; + unsigned op_data_incr; + size_t i, j, k, l, m, n, o, p; + + if (mpi_rank == 0) + TESTING("random I/O"); + + /* Skipped configurations */ + if (!(flags & MDSET_FLAG_COLLECTIVE_OPT)) { + if (mpi_rank == 0) + SKIPPED(); + return; + } + + /* Calculate maximum number of datasets */ + max_dsets = (flags & MDSET_FLAG_MDSET) ? MAX_DSETS : 1; + + /* Calculate data increment per write operation */ + op_data_incr = (unsigned)max_dsets * MAX_DSET_X * MAX_DSET_Y * (unsigned)mpi_size; + + /* Calculate buffer size */ + buf_size = max_dsets * MAX_DSET_X * MAX_DSET_Y * sizeof(unsigned); + + /* Initialize dcpl_id array */ + for (i = 0; i < max_dsets; i++) + dcpl_id[i] = -1; + + /* Allocate buffers */ + if (NULL == (rbuf = (unsigned *)HDmalloc(buf_size))) + T_PMD_ERROR; + if (NULL == (erbuf = (unsigned *)HDmalloc(buf_size))) + T_PMD_ERROR; + if (NULL == (wbuf = (unsigned *)HDmalloc(buf_size))) + T_PMD_ERROR; + if (NULL == (efbuf = (unsigned *)HDmalloc(buf_size))) + T_PMD_ERROR; + if (NULL == (dset_usage = (unsigned char *)HDmalloc(max_dsets * MAX_DSET_X * MAX_DSET_Y))) + T_PMD_ERROR; + + /* Initialize buffer indices */ + for (i = 0; i < max_dsets; i++) + for (j = 0; j < MAX_DSET_X; j++) { + rbufi[i][j] = rbuf + (i * MAX_DSET_X * MAX_DSET_Y) + (j * MAX_DSET_Y); + erbufi[i][j] = erbuf + (i * MAX_DSET_X * MAX_DSET_Y) + (j * MAX_DSET_Y); + wbufi[i][j] = wbuf + (i * MAX_DSET_X * MAX_DSET_Y) + (j * MAX_DSET_Y); + efbufi[i][j] = efbuf + (i * MAX_DSET_X * MAX_DSET_Y) + (j * MAX_DSET_Y); + dset_usagei[i][j] = dset_usage + (i * MAX_DSET_X * MAX_DSET_Y) + (j * MAX_DSET_Y); + } /* end for */ + + /* Initialize 3rd dimension information (for tricking library into using + * non-"shapesame" code */ + for (i = 0; i < max_dsets; i++) + dset_dims[i][2] = 1; + for (i = 0; i < MAX_HS; i++) { + start[i][2] = 0; + count[i][2] = 1; + } /* end for */ + + /* Initialize IDs */ + for (i = 0; i < max_dsets; i++) { + dset_ids[i] = -1; + file_space_ids[i] = -1; + mem_type_ids[i] = H5T_NATIVE_UINT; + mem_space_ids[i] = -1; + } /* end for */ + + /* Generate memory dataspace */ + dset_dims[0][0] = MAX_DSET_X; + dset_dims[0][1] = MAX_DSET_Y; + if ((mem_space_ids[0] = H5Screate_simple((flags & MDSET_FLAG_SHAPESAME) ? 2 : 3, dset_dims[0], NULL)) < 0) + T_PMD_ERROR; + for (i = 1; i < max_dsets; i++) + if ((mem_space_ids[i] = H5Scopy(mem_space_ids[0])) < 0) + T_PMD_ERROR; + + /* Create fapl */ + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + T_PMD_ERROR; + + /* Set MPIO file driver */ + if ((H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL)) < 0) + T_PMD_ERROR; + + /* Create dcpl 0 */ + if ((dcpl_id[0] = H5Pcreate(H5P_DATASET_CREATE)) < 0) + T_PMD_ERROR; + + /* Set fill time to alloc, and alloc time to early (so we always know + * what's in the file) */ + if (H5Pset_fill_time(dcpl_id[0], H5D_FILL_TIME_ALLOC) < 0) + T_PMD_ERROR; + if (H5Pset_alloc_time(dcpl_id[0], H5D_ALLOC_TIME_EARLY) < 0) + T_PMD_ERROR; + + /* Set filters if requested */ + if (flags & MDSET_FLAG_FILTER) { + if (fletcher32_avail) + if (H5Pset_fletcher32(dcpl_id[0]) < 0) + T_PMD_ERROR; + if (deflate_avail) + if (H5Pset_deflate(dcpl_id[0], 1) < 0) + T_PMD_ERROR; + } + + /* Copy dcpl 0 to other slots in dcpl_id array */ + for (i = 1; i < MAX_DSETS; i++) + if ((dcpl_id[i] = H5Pcopy(dcpl_id[0])) < 0) + T_PMD_ERROR; + + /* If this is a multi layout run, dataset 2 will use filters, set them now */ + if (flags & MDSET_FLAG_MLAYOUT) { + if (fletcher32_avail) + if (H5Pset_fletcher32(dcpl_id[2]) < 0) + T_PMD_ERROR; + if (deflate_avail) + if (H5Pset_deflate(dcpl_id[2], 1) < 0) + T_PMD_ERROR; + } + + /* Create dxpl */ + if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) + T_PMD_ERROR; + + /* Set collective or independent I/O */ + if (flags & MDSET_FLAG_COLLECTIVE) { + if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0) + T_PMD_ERROR; + + /* Set low level I/O mode */ + if (flags & MDSET_FLAG_COLLECTIVE_OPT) { + if (H5Pset_dxpl_mpio_collective_opt(dxpl_id, H5FD_MPIO_COLLECTIVE_IO) < 0) + T_PMD_ERROR; + } + else if (H5Pset_dxpl_mpio_collective_opt(dxpl_id, H5FD_MPIO_INDIVIDUAL_IO) < 0) + T_PMD_ERROR; + } /* end if */ + else if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT) < 0) + T_PMD_ERROR; + + for (i = 0; i < niter; i++) { + /* Determine number of datasets */ + ndsets = (flags & MDSET_FLAG_MLAYOUT) ? 3 + : (flags & MDSET_FLAG_MDSET) ? (size_t)((size_t)HDrandom() % max_dsets) + 1 + : 1; + + /* Create file */ + if ((file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) + T_PMD_ERROR; + + /* Create datasets */ + for (j = 0; j < ndsets; j++) { + hbool_t use_chunk = + (flags & MDSET_FLAG_CHUNK) || ((flags & MDSET_FLAG_MLAYOUT) && (j == 1 || j == 2)); + + /* Generate file dataspace */ + dset_dims[j][0] = (hsize_t)((HDrandom() % MAX_DSET_X) + 1); + dset_dims[j][1] = (hsize_t)((HDrandom() % MAX_DSET_Y) + 1); + if ((file_space_ids[j] = H5Screate_simple(2, dset_dims[j], use_chunk ? max_dims : NULL)) < 0) + T_PMD_ERROR; + + /* Generate chunk if called for by configuration (multi layout uses chunked for datasets + * 1 and 2) */ + if (use_chunk) { + chunk_dims[0] = (hsize_t)((HDrandom() % MAX_CHUNK_X) + 1); + chunk_dims[1] = (hsize_t)((HDrandom() % MAX_CHUNK_Y) + 1); + if (H5Pset_chunk(dcpl_id[j], 2, chunk_dims) < 0) + T_PMD_ERROR; + } /* end if */ + + /* Create dataset */ + /* If MDSET_FLAG_TCONV is set, use a different datatype with 50% probability, so + * some datasets require type conversion and others do not */ + if ((dset_ids[j] = H5Dcreate2(file_id, dset_name[j], + (flags & MDSET_FLAG_TCONV && HDrandom() % 2) ? H5T_NATIVE_LONG + : H5T_NATIVE_UINT, + file_space_ids[j], H5P_DEFAULT, dcpl_id[j], H5P_DEFAULT)) < 0) + T_PMD_ERROR; + } /* end for */ + + /* Initialize read buffer and expected read buffer */ + (void)HDmemset(rbuf, 0, buf_size); + (void)HDmemset(erbuf, 0, buf_size); + + /* Initialize write buffer */ + for (j = 0; j < max_dsets; j++) + for (k = 0; k < MAX_DSET_X; k++) + for (l = 0; l < MAX_DSET_Y; l++) + wbufi[j][k][l] = (unsigned)(((unsigned)mpi_rank * max_dsets * MAX_DSET_X * MAX_DSET_Y) + + (j * MAX_DSET_X * MAX_DSET_Y) + (k * MAX_DSET_Y) + l); + + /* Initialize expected file buffer */ + (void)HDmemset(efbuf, 0, buf_size); + + /* Set last_read to TRUE so we don't reopen the file on the first + * iteration */ + last_read = TRUE; + + /* Perform read/write operations */ + for (j = 0; j < OPS_PER_FILE; j++) { + /* Decide whether to read or write */ + do_read = (hbool_t)(HDrandom() % 2); + + /* Barrier to ensure processes have finished the previous operation + */ + MPI_Barrier(MPI_COMM_WORLD); + + /* If the last operation was a write we must close and reopen the + * file to ensure consistency */ + /* Possibly change to MPI_FILE_SYNC at some point? -NAF */ + if (!last_read) { + /* Close datasets */ + for (k = 0; k < ndsets; k++) { + if (H5Dclose(dset_ids[k]) < 0) + T_PMD_ERROR; + dset_ids[k] = -1; + } /* end for */ + + /* Close file */ + if (H5Fclose(file_id) < 0) + T_PMD_ERROR; + file_id = -1; + + /* Barrier */ + MPI_Barrier(MPI_COMM_WORLD); + + /* Reopen file */ + if ((file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl_id)) < 0) + T_PMD_ERROR; + + /* Reopen datasets */ + for (k = 0; k < ndsets; k++) { + if ((dset_ids[k] = H5Dopen2(file_id, dset_name[k], H5P_DEFAULT)) < 0) + T_PMD_ERROR; + } /* end for */ + + /* Barrier */ + MPI_Barrier(MPI_COMM_WORLD); + } /* end if */ + + /* Keep track of whether the last operation was a read */ + last_read = do_read; + + /* Loop over datasets */ + for (k = 0; k < ndsets; k++) { + /* Reset selection */ + if (H5Sselect_none(mem_space_ids[k]) < 0) + T_PMD_ERROR; + if (H5Sselect_none(file_space_ids[k]) < 0) + T_PMD_ERROR; + + /* Reset dataset usage array, if writing */ + if (!do_read) + HDmemset(dset_usage, 0, max_dsets * MAX_DSET_X * MAX_DSET_Y); + + /* Iterate over processes */ + for (l = 0; l < (size_t)mpi_size; l++) { + /* Calculate difference between data in process being + * iterated over and that in this process */ + rank_data_diff = + (int)((unsigned)max_dsets * MAX_DSET_X * MAX_DSET_Y) * ((int)l - (int)mpi_rank); + + /* Decide whether to do a hyperslab or point selection */ + if (HDrandom() % 2) { + /* Hyperslab */ + size_t nhs = (size_t)((HDrandom() % MAX_HS) + 1); /* Number of hyperslabs */ + size_t max_hs_x = (MAX_HS_X <= dset_dims[k][0]) + ? MAX_HS_X + : dset_dims[k][0]; /* Determine maximum hyperslab size in X */ + size_t max_hs_y = (MAX_HS_Y <= dset_dims[k][1]) + ? MAX_HS_Y + : dset_dims[k][1]; /* Determine maximum hyperslab size in Y */ + + for (m = 0; m < nhs; m++) { + overlap = TRUE; + for (n = 0; overlap && (n < MAX_SEL_RETRIES); n++) { + /* Generate hyperslab */ + count[m][0] = (hsize_t)(((hsize_t)HDrandom() % max_hs_x) + 1); + count[m][1] = (hsize_t)(((hsize_t)HDrandom() % max_hs_y) + 1); + start[m][0] = (count[m][0] == dset_dims[k][0]) + ? 0 + : (hsize_t)HDrandom() % (dset_dims[k][0] - count[m][0] + 1); + start[m][1] = (count[m][1] == dset_dims[k][1]) + ? 0 + : (hsize_t)HDrandom() % (dset_dims[k][1] - count[m][1] + 1); + + /* If writing, check for overlap with other processes */ + overlap = FALSE; + if (!do_read) + for (o = start[m][0]; (o < (start[m][0] + count[m][0])) && !overlap; o++) + for (p = start[m][1]; (p < (start[m][1] + count[m][1])) && !overlap; + p++) + if (dset_usagei[k][o][p]) + overlap = TRUE; + } /* end for */ + + /* If we did not find a non-overlapping hyperslab + * quit trying to generate new ones */ + if (overlap) { + nhs = m; + break; + } /* end if */ + + /* Select hyperslab if this is the current process + */ + if (l == (size_t)mpi_rank) { + if (H5Sselect_hyperslab(mem_space_ids[k], H5S_SELECT_OR, start[m], NULL, + count[m], NULL) < 0) + T_PMD_ERROR; + if (H5Sselect_hyperslab(file_space_ids[k], H5S_SELECT_OR, start[m], NULL, + count[m], NULL) < 0) + T_PMD_ERROR; + } /* end if */ + + /* Update expected buffers */ + if (do_read) { + if (l == (size_t)mpi_rank) + for (n = start[m][0]; n < (start[m][0] + count[m][0]); n++) + for (o = start[m][1]; o < (start[m][1] + count[m][1]); o++) + erbufi[k][n][o] = efbufi[k][n][o]; + } /* end if */ + else + for (n = start[m][0]; n < (start[m][0] + count[m][0]); n++) + for (o = start[m][1]; o < (start[m][1] + count[m][1]); o++) + efbufi[k][n][o] = (unsigned)((int)wbufi[k][n][o] + rank_data_diff); + } /* end for */ + + /* Update dataset usage array if writing */ + if (!do_read) + for (m = 0; m < nhs; m++) + for (n = start[m][0]; n < (start[m][0] + count[m][0]); n++) + for (o = start[m][1]; o < (start[m][1] + count[m][1]); o++) + dset_usagei[k][n][o] = (unsigned char)1; + } /* end if */ + else { + /* Point selection */ + size_t npoints = + (size_t)(((size_t)HDrandom() % MAX_POINTS) + 1); /* Number of points */ + + /* Reset dataset usage array if reading, since in this case we don't care + * about overlapping selections between processes */ + if (do_read) + HDmemset(dset_usage, 0, max_dsets * MAX_DSET_X * MAX_DSET_Y); + + /* Generate points */ + for (m = 0; m < npoints; m++) { + overlap = TRUE; + for (n = 0; overlap && (n < MAX_SEL_RETRIES); n++) { + /* Generate point */ + points[2 * m] = (unsigned)((hsize_t)HDrandom() % dset_dims[k][0]); + points[(2 * m) + 1] = (unsigned)((hsize_t)HDrandom() % dset_dims[k][1]); + + /* Check for overlap with other processes (write) or this process + * (always) */ + overlap = FALSE; + if (dset_usagei[k][points[2 * m]][points[(2 * m) + 1]]) + overlap = TRUE; + } /* end for */ + + /* If we did not find a non-overlapping point quit + * trying to generate new ones */ + if (overlap) { + npoints = m; + break; + } /* end if */ + + /* Update dataset usage array after each point to prevent the same point + * being selected twice by a single process, since this is not supported + * by MPI */ + dset_usagei[k][points[2 * m]][points[(2 * m) + 1]] = (unsigned char)1; + } /* end for */ + + /* Select points in file if this is the current process + */ + if ((l == (size_t)mpi_rank) && (npoints > 0)) + if (H5Sselect_elements(file_space_ids[k], H5S_SELECT_APPEND, npoints, points) < 0) + T_PMD_ERROR; + + /* Update expected buffers */ + if (do_read) { + if (l == (size_t)mpi_rank) + for (m = 0; m < npoints; m++) + erbufi[k][points[2 * m]][points[(2 * m) + 1]] = + efbufi[k][points[2 * m]][points[(2 * m) + 1]]; + } /* end if */ + else + for (m = 0; m < npoints; m++) + efbufi[k][points[2 * m]][points[(2 * m) + 1]] = + (unsigned)((int)wbufi[k][points[2 * m]][points[(2 * m) + 1]] + + rank_data_diff); + + /* Select points in memory if this is the current + * process */ + if ((l == (size_t)mpi_rank) && (npoints > 0)) { + /* Convert to 3D for memory selection, if not using + * "shapesame" */ + if (!(flags & MDSET_FLAG_SHAPESAME)) { + for (m = npoints - 1; m > 0; m--) { + points[(3 * m) + 2] = 0; + points[(3 * m) + 1] = points[(2 * m) + 1]; + points[3 * m] = points[2 * m]; + } /* end for */ + points[2] = 0; + } /* end if */ + + /* Select elements */ + if (H5Sselect_elements(mem_space_ids[k], H5S_SELECT_APPEND, npoints, points) < 0) + T_PMD_ERROR; + } /* end if */ + } /* end else */ + } /* end for */ + } /* end for */ + + /* Perform I/O */ + if (do_read) { + if (flags & MDSET_FLAG_MDSET) { + /* Set buffers */ + for (k = 0; k < ndsets; k++) + rbufs[k] = rbufi[k][0]; + + /* Read datasets */ + if (H5Dread_multi(ndsets, dset_ids, mem_type_ids, mem_space_ids, file_space_ids, dxpl_id, + rbufs) < 0) + T_PMD_ERROR; + } /* end if */ + else + /* Read */ + if (H5Dread(dset_ids[0], mem_type_ids[0], mem_space_ids[0], file_space_ids[0], dxpl_id, + rbuf) < 0) + T_PMD_ERROR; + + /* Verify data */ + if (0 != memcmp(rbuf, erbuf, buf_size)) + T_PMD_ERROR; + } /* end if */ + else { + if (flags & MDSET_FLAG_MDSET) { + /* Set buffers */ + for (k = 0; k < ndsets; k++) + wbufs[k] = wbufi[k][0]; + + /* Write datasets */ + if (H5Dwrite_multi(ndsets, dset_ids, mem_type_ids, mem_space_ids, file_space_ids, dxpl_id, + wbufs) < 0) + T_PMD_ERROR; + } /* end if */ + else + /* Write */ + if (H5Dwrite(dset_ids[0], mem_type_ids[0], mem_space_ids[0], file_space_ids[0], dxpl_id, + wbuf) < 0) + T_PMD_ERROR; + + /* Update wbuf */ + for (l = 0; l < max_dsets; l++) + for (m = 0; m < MAX_DSET_X; m++) + for (n = 0; n < MAX_DSET_Y; n++) + wbufi[l][m][n] += op_data_incr; + } /* end else */ + } /* end for */ + + /* Close */ + for (j = 0; j < ndsets; j++) { + if (H5Dclose(dset_ids[j]) < 0) + T_PMD_ERROR; + dset_ids[j] = -1; + if (H5Sclose(file_space_ids[j]) < 0) + T_PMD_ERROR; + file_space_ids[j] = -1; + } /* end for */ + if (H5Fclose(file_id) < 0) + T_PMD_ERROR; + file_id = -1; + } /* end for */ + + /* Close */ + for (i = 0; i < max_dsets; i++) { + if (H5Sclose(mem_space_ids[i]) < 0) + T_PMD_ERROR; + mem_space_ids[i] = -1; + } /* end for */ + if (H5Pclose(dxpl_id) < 0) + T_PMD_ERROR; + dxpl_id = -1; + for (i = 0; i < MAX_DSETS; i++) { + if (H5Pclose(dcpl_id[i]) < 0) + T_PMD_ERROR; + dcpl_id[i] = -1; + } + if (H5Pclose(fapl_id) < 0) + T_PMD_ERROR; + fapl_id = -1; + free(rbuf); + rbuf = NULL; + free(erbuf); + erbuf = NULL; + free(wbuf); + wbuf = NULL; + free(efbuf); + efbuf = NULL; + free(dset_usage); + dset_usage = NULL; + + if (mpi_rank == 0) + PASSED(); + + return; +} /* end test_mdset() */ + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Runs all tests with all combinations of configuration + * flags. + * + * Return: Success: 0 + * Failure: 1 + * + * Programmer: Neil Fortner + * Monday, March 10, 2014 + * + *------------------------------------------------------------------------- + */ +int +main(int argc, char *argv[]) +{ + unsigned i; + int ret; + + h5_reset(); + + /* Initialize MPI */ + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Generate random number seed, if rank 0 */ + if (MAINPROCESS) + seed = (unsigned)HDtime(NULL); + + /* Broadcast seed from rank 0 (other ranks will receive rank 0's seed) */ + if (MPI_SUCCESS != MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD)) + T_PMD_ERROR; + + /* Seed random number generator with shared seed (so all ranks generate the + * same sequence) */ + HDsrandom(seed); + + /* Fill dset_name array */ + for (i = 0; i < MAX_DSETS; i++) { + if ((ret = snprintf(dset_name[i], DSET_MAX_NAME_LEN, "dset%u", i)) < 0) + T_PMD_ERROR; + if (ret >= DSET_MAX_NAME_LEN) + T_PMD_ERROR; + } /* end for */ + + /* Check if deflate and fletcher32 filters are available */ + if ((deflate_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) < 0) + T_PMD_ERROR; + if ((fletcher32_avail = H5Zfilter_avail(H5Z_FILTER_FLETCHER32)) < 0) + T_PMD_ERROR; + + for (i = 0; i <= MDSET_ALL_FLAGS; i++) { + /* Skip incompatible flag combinations */ + if (((i & MDSET_FLAG_MLAYOUT) && (i & MDSET_FLAG_CHUNK)) || + ((i & MDSET_FLAG_MLAYOUT) && !(i & MDSET_FLAG_MDSET)) || + ((i & MDSET_FLAG_MLAYOUT) && !(i & MDSET_FLAG_COLLECTIVE)) || + ((i & MDSET_FLAG_MLAYOUT) && (i & MDSET_FLAG_TCONV)) || + ((i & MDSET_FLAG_FILTER) && !(i & MDSET_FLAG_CHUNK)) || + ((i & MDSET_FLAG_FILTER) && !(i & MDSET_FLAG_COLLECTIVE)) || + ((i & MDSET_FLAG_FILTER) && (i & MDSET_FLAG_TCONV)) || + (!(i & MDSET_FLAG_COLLECTIVE_OPT) && !(i & MDSET_FLAG_COLLECTIVE))) + continue; + + /* Print flag configuration */ + if (MAINPROCESS) { + puts("\nConfiguration:"); + printf(" Layout: %s\n", (i & MDSET_FLAG_MLAYOUT) ? "Multi" + : (i & MDSET_FLAG_CHUNK) ? "Chunked" + : "Contiguous"); + printf(" Shape same: %s\n", (i & MDSET_FLAG_SHAPESAME) ? "Yes" : "No"); + printf(" I/O type: %s\n", (i & MDSET_FLAG_MDSET) ? "Multi" : "Single"); + printf(" MPI I/O type: %s\n", (i & MDSET_FLAG_COLLECTIVE) ? "Collective" : "Independent"); + if (i & MDSET_FLAG_COLLECTIVE) + printf(" Low level MPI I/O:%s\n", + (i & MDSET_FLAG_COLLECTIVE_OPT) ? "Collective" : "Independent"); + printf(" Type conversion: %s\n", (i & MDSET_FLAG_TCONV) ? "Yes" : "No"); + printf(" Data filter: %s\n", (i & MDSET_FLAG_MLAYOUT) ? "Mixed" + : (i & MDSET_FLAG_FILTER) ? "Yes" + : "No"); + } /* end if */ + + test_pmdset(10, i); + } /* end for */ + + /* Barrier to make sure all ranks are done before deleting the file, and + * also to clean up output (make sure PASSED is printed before any of the + * following messages) */ + if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) + T_PMD_ERROR; + + /* Delete file */ + if (mpi_rank == 0) + if (MPI_SUCCESS != MPI_File_delete(FILENAME, MPI_INFO_NULL)) + T_PMD_ERROR; + + /* Gather errors from all processes */ + MPI_Allreduce(&nerrors, &ret, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); + nerrors = ret; + + if (MAINPROCESS) { + printf("===================================\n"); + if (nerrors) + printf("***Parallel multi dataset tests detected %d errors***\n", nerrors); + else + printf("Parallel multi dataset tests finished with no errors\n"); + printf("===================================\n"); + } /* end if */ + + /* close HDF5 library */ + H5close(); + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrors) because exit code is limited to 1 byte */ + return (nerrors != 0); +} /* end main() */ diff --git a/testpar/t_pread.c b/testpar/t_pread.c new file mode 100644 index 0000000..9a2493d --- /dev/null +++ b/testpar/t_pread.c @@ -0,0 +1,1219 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Collective file open optimization tests + * + */ + +#include "testpar.h" +#include "H5Dprivate.h" + +/* The collection of files is included below to aid + * an external "cleanup" process if required. + * + * Note that the code below relies on the ordering of this array + * since each set of three is used by the tests either to construct + * or to read and validate. + */ +#define NFILENAME 3 +const char *FILENAMES[NFILENAME + 1] = {"reloc_t_pread_data_file", "reloc_t_pread_group_0_file", + "reloc_t_pread_group_1_file", NULL}; +#define FILENAME_BUF_SIZE 1024 + +#define COUNT 1000 + +#define LIMIT_NPROC 6 + +hbool_t pass = TRUE; +static const char *random_hdf5_text = "Now is the time for all first-time-users of HDF5 to read their \ +manual or go thru the tutorials!\n\ +While you\'re at it, now is also the time to read up on MPI-IO."; + +static const char *hitchhiker_quote = "A common mistake that people make when trying to design something\n\ +completely foolproof is to underestimate the ingenuity of complete\n\ +fools.\n"; + +static int generate_test_file(MPI_Comm comm, int mpi_rank, int group); +static int test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group); + +static char *test_argv0 = NULL; + +/*------------------------------------------------------------------------- + * Function: generate_test_file + * + * Purpose: This function is called to produce an HDF5 data file + * whose superblock is relocated to a power-of-2 boundary. + * + * Since data will be read back and validated, we generate + * data in a predictable manner rather than randomly. + * For now, we simply use the global mpi_rank of the writing + * process as a starting component for the data generation. + * Subsequent writes are increments from the initial start + * value. + * + * In the overall scheme of running the test, we'll call + * this function twice: first as a collection of all MPI + * processes and then a second time with the processes split + * more or less in half. Each sub group will operate + * collectively on their assigned file. This split into + * subgroups validates that parallel groups can successfully + * open and read data independently from the other parallel + * operations taking place. + * + * Return: Success: 0 + * + * Failure: 1 + * + * Programmer: Richard Warren + * 10/1/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static int +generate_test_file(MPI_Comm comm, int mpi_rank, int group_id) +{ + int header = -1; + const char *fcn_name = "generate_test_file()"; + const char *failure_mssg = NULL; + const char *group_filename = NULL; + char data_filename[FILENAME_BUF_SIZE]; + int file_index = 0; + int group_size; + int group_rank; + int local_failure = 0; + int global_failures = 0; + hsize_t count = COUNT; + hsize_t i; + hsize_t offset; + hsize_t dims[1] = {0}; + hid_t file_id = -1; + hid_t memspace = -1; + hid_t filespace = -1; + hid_t fctmpl = -1; + hid_t fapl_id = -1; + hid_t dxpl_id = -1; + hid_t dset_id = -1; + hid_t dset_id_ch = -1; + hid_t dcpl_id = H5P_DEFAULT; + hsize_t chunk[1]; + float nextValue; + float *data_slice = NULL; + + pass = TRUE; + + HDassert(comm != MPI_COMM_NULL); + + if ((MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) { + pass = FALSE; + failure_mssg = "generate_test_file: MPI_Comm_rank failed.\n"; + } + + if ((MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) { + pass = FALSE; + failure_mssg = "generate_test_file: MPI_Comm_size failed.\n"; + } + + if (mpi_rank == 0) { + + HDfprintf(stdout, "Constructing test files..."); + } + + /* Setup the file names + * The test specific filenames are stored as consecutive + * array entries in the global 'FILENAMES' array above. + * Here, we simply decide on the starting index for + * file construction. The reading portion of the test + * will have a similar setup process... + */ + if (pass) { + if (comm == MPI_COMM_WORLD) { /* Test 1 */ + file_index = 0; + } + else if (group_id == 0) { /* Test 2 group 0 */ + file_index = 1; + } + else { /* Test 2 group 1 */ + file_index = 2; + } + + /* The 'group_filename' is just a temp variable and + * is used to call into the h5_fixname function. No + * need to worry that we reassign it for each file! + */ + group_filename = FILENAMES[file_index]; + HDassert(group_filename); + + /* Assign the 'data_filename' */ + if (h5_fixname(group_filename, H5P_DEFAULT, data_filename, sizeof(data_filename)) == NULL) { + pass = FALSE; + failure_mssg = "h5_fixname(0) failed.\n"; + } + } + + /* setup data to write */ + if (pass) { + if ((data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL) { + pass = FALSE; + failure_mssg = "malloc of data_slice failed.\n"; + } + } + + if (pass) { + nextValue = (float)(mpi_rank * COUNT); + + for (i = 0; i < COUNT; i++) { + data_slice[i] = nextValue; + nextValue += 1; + } + } + + /* Initialize a file creation template */ + if (pass) { + if ((fctmpl = H5Pcreate(H5P_FILE_CREATE)) < 0) { + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n"; + } + else if (H5Pset_userblock(fctmpl, 512) != SUCCEED) { + pass = FALSE; + failure_mssg = "H5Pset_userblock(,size) failed.\n"; + } + } + /* setup FAPL */ + if (pass) { + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) { + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n"; + } + } + + if (pass) { + if ((H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Pset_fapl_mpio() failed\n"; + } + } + + /* create the data file */ + if (pass) { + if ((file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC, fctmpl, fapl_id)) < 0) { + pass = FALSE; + failure_mssg = "H5Fcreate() failed.\n"; + } + } + + /* create and write the dataset */ + if (pass) { + if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) { + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n"; + } + } + + if (pass) { + if ((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0) { + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed.\n"; + } + } + + if (pass) { + dims[0] = COUNT; + if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Screate_simple(1, dims, NULL) failed (1).\n"; + } + } + + if (pass) { + dims[0] *= (hsize_t)group_size; + if ((filespace = H5Screate_simple(1, dims, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Screate_simple(1, dims, NULL) failed (2).\n"; + } + } + + if (pass) { + offset = (hsize_t)group_rank * (hsize_t)COUNT; + if ((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed.\n"; + } + } + + if (pass) { + if ((dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, H5P_DEFAULT, + H5P_DEFAULT)) < 0) { + pass = FALSE; + failure_mssg = "H5Dcreate2() failed.\n"; + } + } + + if (pass) { + if ((H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) { + pass = FALSE; + failure_mssg = "H5Dwrite() failed.\n"; + } + } + + /* create a chunked dataset */ + chunk[0] = COUNT / 8; + + if (pass) { + if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) { + pass = FALSE; + failure_mssg = "H5Pcreate() failed.\n"; + } + } + + if (pass) { + if ((H5Pset_chunk(dcpl_id, 1, chunk)) < 0) { + pass = FALSE; + failure_mssg = "H5Pset_chunk() failed.\n"; + } + } + + if (pass) { + + if ((dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, + dcpl_id, H5P_DEFAULT)) < 0) { + pass = FALSE; + failure_mssg = "H5Dcreate2() failed.\n"; + } + } + + if (pass) { + if ((H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice)) < 0) { + pass = FALSE; + failure_mssg = "H5Dwrite() failed.\n"; + } + } + if (pass || (dcpl_id != -1)) { + if (H5Pclose(dcpl_id) < 0) { + pass = FALSE; + failure_mssg = "H5Pclose(dcpl_id) failed.\n"; + } + } + + if (pass || (dset_id_ch != -1)) { + if (H5Dclose(dset_id_ch) < 0) { + pass = FALSE; + failure_mssg = "H5Dclose(dset_id_ch) failed.\n"; + } + } + + /* close file, etc. */ + if (pass || (dset_id != -1)) { + if (H5Dclose(dset_id) < 0) { + pass = FALSE; + failure_mssg = "H5Dclose(dset_id) failed.\n"; + } + } + + if (pass || (memspace != -1)) { + if (H5Sclose(memspace) < 0) { + pass = FALSE; + failure_mssg = "H5Sclose(memspace) failed.\n"; + } + } + + if (pass || (filespace != -1)) { + if (H5Sclose(filespace) < 0) { + pass = FALSE; + failure_mssg = "H5Sclose(filespace) failed.\n"; + } + } + + if (pass || (file_id != -1)) { + if (H5Fclose(file_id) < 0) { + pass = FALSE; + failure_mssg = "H5Fclose(file_id) failed.\n"; + } + } + + if (pass || (dxpl_id != -1)) { + if (H5Pclose(dxpl_id) < 0) { + pass = FALSE; + failure_mssg = "H5Pclose(dxpl_id) failed.\n"; + } + } + + if (pass || (fapl_id != -1)) { + if (H5Pclose(fapl_id) < 0) { + pass = FALSE; + failure_mssg = "H5Pclose(fapl_id) failed.\n"; + } + } + + if (pass || (fctmpl != -1)) { + if (H5Pclose(fctmpl) < 0) { + pass = FALSE; + failure_mssg = "H5Pclose(fctmpl) failed.\n"; + } + } + + /* Add a userblock to the head of the datafile. + * We will use this to for a functional test of the + * file open optimization. This is superblock + * relocation is done by the rank 0 process associated + * with the communicator being used. For test 1, we + * utilize MPI_COMM_WORLD, so group_rank 0 is the + * same as mpi_rank 0. For test 2 which utilizes + * two groups resulting from an MPI_Comm_split, we + * will have parallel groups and hence two + * group_rank(0) processes. Each parallel group + * will create a unique file with different text + * headers and different data. + */ + if (group_rank == 0) { + const char *text_to_write; + size_t bytes_to_write; + + if (group_id == 0) + text_to_write = random_hdf5_text; + else + text_to_write = hitchhiker_quote; + + bytes_to_write = HDstrlen(text_to_write); + + if (pass) { + if ((header = HDopen(data_filename, O_WRONLY)) < 0) { + pass = FALSE; + failure_mssg = "HDopen(data_filename, O_WRONLY) failed.\n"; + } + } + + if (pass) { + HDlseek(header, 0, SEEK_SET); + if (HDwrite(header, text_to_write, bytes_to_write) < 0) { + pass = FALSE; + failure_mssg = "Unable to write user text into file.\n"; + } + } + + if (pass || (header > 0)) { + if (HDclose(header) < 0) { + pass = FALSE; + failure_mssg = "HDclose() failed.\n"; + } + } + } + + /* collect results from other processes. + * Only overwrite the failure message if no previous error + * has been detected + */ + local_failure = (pass ? 0 : 1); + + /* This is a global all reduce (NOT group specific) */ + if (MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS) { + if (pass) { + pass = FALSE; + failure_mssg = "MPI_Allreduce() failed.\n"; + } + } + else if ((pass) && (global_failures > 0)) { + pass = FALSE; + failure_mssg = "One or more processes report failure.\n"; + } + + /* report results */ + if (mpi_rank == 0) { + if (pass) { + HDfprintf(stdout, "Done.\n"); + } + else { + HDfprintf(stdout, "FAILED.\n"); + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + + /* free data_slice if it has been allocated */ + if (data_slice != NULL) { + HDfree(data_slice); + data_slice = NULL; + } + + return (!pass); + +} /* generate_test_file() */ + +/*------------------------------------------------------------------------- + * Function: test_parallel_read + * + * Purpose: This actually tests the superblock optimization + * and covers the three primary cases we're interested in. + * 1). That HDF5 files can be opened in parallel by + * the rank 0 process and that the superblock + * offset is correctly broadcast to the other + * parallel file readers. + * 2). That a parallel application can correctly + * handle reading multiple files by using + * subgroups of MPI_COMM_WORLD and that each + * subgroup operates as described in (1) to + * collectively read the data. + * 3). Testing proc0-read-and-MPI_Bcast using + * sub-communicators, and reading into + * a memory space that is different from the + * file space, and chunked datasets. + * + * The global MPI rank is used for reading and + * writing data for process specific data in the + * dataset. We do this rather simplisticly, i.e. + * rank 0: writes/reads 0-9999 + * rank 1: writes/reads 1000-1999 + * rank 2: writes/reads 2000-2999 + * ... + * + * Return: Success: 0 + * + * Failure: 1 + * + * Programmer: Richard Warren + * 10/1/17 + * + * Modifications: + * + *------------------------------------------------------------------------- + */ +static int +test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id) +{ + const char *failure_mssg; + const char *fcn_name = "test_parallel_read()"; + const char *group_filename = NULL; + char reloc_data_filename[FILENAME_BUF_SIZE]; + int local_failure = 0; + int global_failures = 0; + int group_size; + int group_rank; + hid_t fapl_id = -1; + hid_t file_id = -1; + hid_t dset_id = -1; + hid_t dset_id_ch = -1; + hid_t dxpl_id = H5P_DEFAULT; + hid_t memspace = -1; + hid_t filespace = -1; + hid_t filetype = -1; + size_t filetype_size; + hssize_t dset_size; + hsize_t i; + hsize_t offset; + hsize_t count = COUNT; + hsize_t dims[1] = {0}; + float nextValue; + float *data_slice = NULL; + + pass = TRUE; + + HDassert(comm != MPI_COMM_NULL); + + if ((MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) { + pass = FALSE; + failure_mssg = "test_parallel_read: MPI_Comm_rank failed.\n"; + } + + if ((MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) { + pass = FALSE; + failure_mssg = "test_parallel_read: MPI_Comm_size failed.\n"; + } + + if (mpi_rank == 0) { + if (comm == MPI_COMM_WORLD) { + TESTING("parallel file open test 1"); + } + else { + TESTING("parallel file open test 2"); + } + } + + /* allocate space for the data_slice array */ + if (pass) { + if ((data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL) { + pass = FALSE; + failure_mssg = "malloc of data_slice failed.\n"; + } + } + + /* Select the file file name to read + * Please see the comments in the 'generate_test_file' function + * for more details... + */ + if (pass) { + + if (comm == MPI_COMM_WORLD) /* test 1 */ + group_filename = FILENAMES[0]; + else if (group_id == 0) /* test 2 group 0 */ + group_filename = FILENAMES[1]; + else /* test 2 group 1 */ + group_filename = FILENAMES[2]; + + HDassert(group_filename); + if (h5_fixname(group_filename, H5P_DEFAULT, reloc_data_filename, sizeof(reloc_data_filename)) == + NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname(1) failed.\n"; + } + } + + /* setup FAPL */ + if (pass) { + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) { + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n"; + } + } + + if (pass) { + if ((H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Pset_fapl_mpio() failed\n"; + } + } + + /* open the file -- should have user block, exercising the optimization */ + if (pass) { + if ((file_id = H5Fopen(reloc_data_filename, H5F_ACC_RDONLY, fapl_id)) < 0) { + pass = FALSE; + failure_mssg = "H5Fopen() failed\n"; + } + } + + /* open the data set */ + if (pass) { + if ((dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0) { + pass = FALSE; + failure_mssg = "H5Dopen2() failed\n"; + } + } + + /* open the chunked data set */ + if (pass) { + if ((dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0) { + pass = FALSE; + failure_mssg = "H5Dopen2() failed\n"; + } + } + + /* setup memspace */ + if (pass) { + dims[0] = count; + if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n"; + } + } + + /* setup filespace */ + if (pass) { + if ((filespace = H5Dget_space(dset_id)) < 0) { + pass = FALSE; + failure_mssg = "H5Dget_space(dataset) failed\n"; + } + } + + if (pass) { + offset = (hsize_t)group_rank * count; + if ((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed\n"; + } + } + + /* read this processes section of the data */ + if (pass) { + if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, data_slice)) < 0) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + + /* verify the data */ + if (pass) { + nextValue = (float)((hsize_t)mpi_rank * count); + i = 0; + while ((pass) && (i < count)) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) { + pass = FALSE; + failure_mssg = "Unexpected dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + if (pass || (memspace != -1)) { + if (H5Sclose(memspace) < 0) { + pass = FALSE; + failure_mssg = "H5Sclose(memspace) failed.\n"; + } + } + + if (pass || (filespace != -1)) { + if (H5Sclose(filespace) < 0) { + pass = FALSE; + failure_mssg = "H5Sclose(filespace) failed.\n"; + } + } + + /* free data_slice if it has been allocated */ + if (data_slice != NULL) { + HDfree(data_slice); + data_slice = NULL; + } + + /* + * Test reading proc0-read-and-bcast with sub-communicators + */ + + /* Don't test with more than LIMIT_NPROC processes to avoid memory issues */ + + if (group_size <= LIMIT_NPROC) { +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + hbool_t prop_value; +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + if ((filespace = H5Dget_space(dset_id)) < 0) { + pass = FALSE; + failure_mssg = "H5Dget_space failed.\n"; + } + + if ((dset_size = H5Sget_simple_extent_npoints(filespace)) < 0) { + pass = FALSE; + failure_mssg = "H5Sget_simple_extent_npoints failed.\n"; + } + + if ((filetype = H5Dget_type(dset_id)) < 0) { + pass = FALSE; + failure_mssg = "H5Dget_type failed.\n"; + } + + if ((filetype_size = H5Tget_size(filetype)) == 0) { + pass = FALSE; + failure_mssg = "H5Tget_size failed.\n"; + } + + if (H5Tclose(filetype) < 0) { + pass = FALSE; + failure_mssg = "H5Tclose failed.\n"; + }; + + if ((data_slice = (float *)HDmalloc((size_t)dset_size * filetype_size)) == NULL) { + pass = FALSE; + failure_mssg = "malloc of data_slice failed.\n"; + } + + if (pass) { + if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0) { + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n"; + } + } + + if (pass) { + if ((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0) { + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed.\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (pass) { + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + if (H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, + &prop_value, NULL, NULL, NULL, NULL, NULL, NULL) < 0) { + pass = FALSE; + failure_mssg = "H5Pinsert2() failed\n"; + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* read H5S_ALL section */ + if (pass) { + if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, dxpl_id, data_slice)) < 0) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (pass) { + prop_value = FALSE; + if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pget() failed\n"; + } + if (pass) { + if (prop_value != TRUE) { + pass = FALSE; + failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n"; + } + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* verify the data */ + if (pass) { + + if (comm == MPI_COMM_WORLD) /* test 1 */ + nextValue = 0; + else if (group_id == 0) /* test 2 group 0 */ + nextValue = 0; + else /* test 2 group 1 */ + nextValue = (float)((hsize_t)(mpi_size / 2) * count); + + i = 0; + while ((pass) && (i < (hsize_t)dset_size)) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) { + pass = FALSE; + failure_mssg = "Unexpected dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + /* read H5S_ALL section for the chunked dataset */ + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (pass) { + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + if (H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pset() failed\n"; + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + for (i = 0; i < (hsize_t)dset_size; i++) { + data_slice[i] = 0; + } + if (pass) { + if ((H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, dxpl_id, data_slice)) < 0) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (pass) { + prop_value = FALSE; + if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pget() failed\n"; + } + if (pass) { + if (prop_value == TRUE) { + pass = FALSE; + failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n"; + } + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* verify the data */ + if (pass) { + + if (comm == MPI_COMM_WORLD) /* test 1 */ + nextValue = 0; + else if (group_id == 0) /* test 2 group 0 */ + nextValue = 0; + else /* test 2 group 1 */ + nextValue = (float)((hsize_t)(mpi_size / 2) * count); + + i = 0; + while ((pass) && (i < (hsize_t)dset_size)) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) { + pass = FALSE; + failure_mssg = "Unexpected chunked dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + if (pass || (filespace != -1)) { + if (H5Sclose(filespace) < 0) { + pass = FALSE; + failure_mssg = "H5Sclose(filespace) failed.\n"; + } + } + + /* free data_slice if it has been allocated */ + if (data_slice != NULL) { + HDfree(data_slice); + data_slice = NULL; + } + + /* + * Read an H5S_ALL filespace into a hyperslab defined memory space + */ + + if ((data_slice = (float *)HDmalloc((size_t)(dset_size * 2) * filetype_size)) == NULL) { + pass = FALSE; + failure_mssg = "malloc of data_slice failed.\n"; + } + + /* setup memspace */ + if (pass) { + dims[0] = (hsize_t)dset_size * 2; + if ((memspace = H5Screate_simple(1, dims, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n"; + } + } + if (pass) { + offset = (hsize_t)dset_size; + if ((H5Sselect_hyperslab(memspace, H5S_SELECT_SET, &offset, NULL, &offset, NULL)) < 0) { + pass = FALSE; + failure_mssg = "H5Sselect_hyperslab() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (pass) { + prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF; + if (H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pset() failed\n"; + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* read this processes section of the data */ + if (pass) { + if ((H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace, H5S_ALL, dxpl_id, data_slice)) < 0) { + pass = FALSE; + failure_mssg = "H5Dread() failed\n"; + } + } + +#ifdef H5_HAVE_INSTRUMENTED_LIBRARY + if (pass) { + prop_value = FALSE; + if (H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) { + pass = FALSE; + failure_mssg = "H5Pget() failed\n"; + } + if (pass) { + if (prop_value != TRUE) { + pass = FALSE; + failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n"; + } + } + } +#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */ + + /* verify the data */ + if (pass) { + + if (comm == MPI_COMM_WORLD) /* test 1 */ + nextValue = 0; + else if (group_id == 0) /* test 2 group 0 */ + nextValue = 0; + else /* test 2 group 1 */ + nextValue = (float)((hsize_t)(mpi_size / 2) * count); + + i = (hsize_t)dset_size; + while ((pass) && (i < (hsize_t)dset_size)) { + /* what we really want is data_slice[i] != nextValue -- + * the following is a circumlocution to shut up the + * the compiler. + */ + if ((data_slice[i] > nextValue) || (data_slice[i] < nextValue)) { + pass = FALSE; + failure_mssg = "Unexpected dset contents.\n"; + } + nextValue += 1; + i++; + } + } + + if (pass || (memspace != -1)) { + if (H5Sclose(memspace) < 0) { + pass = FALSE; + failure_mssg = "H5Sclose(memspace) failed.\n"; + } + } + + /* free data_slice if it has been allocated */ + if (data_slice != NULL) { + HDfree(data_slice); + data_slice = NULL; + } + + if (pass || (dxpl_id != -1)) { + if (H5Pclose(dxpl_id) < 0) { + pass = FALSE; + failure_mssg = "H5Pclose(dxpl_id) failed.\n"; + } + } + } + + /* close file, etc. */ + if (pass || (dset_id != -1)) { + if (H5Dclose(dset_id) < 0) { + pass = FALSE; + failure_mssg = "H5Dclose(dset_id) failed.\n"; + } + } + + if (pass || (dset_id_ch != -1)) { + if (H5Dclose(dset_id_ch) < 0) { + pass = FALSE; + failure_mssg = "H5Dclose(dset_id_ch) failed.\n"; + } + } + + if (pass || (file_id != -1)) { + if (H5Fclose(file_id) < 0) { + pass = FALSE; + failure_mssg = "H5Fclose(file_id) failed.\n"; + } + } + + if (pass || (fapl_id != -1)) { + if (H5Pclose(fapl_id) < 0) { + pass = FALSE; + failure_mssg = "H5Pclose(fapl_id) failed.\n"; + } + } + + /* collect results from other processes. + * Only overwrite the failure message if no previous error + * has been detected + */ + local_failure = (pass ? 0 : 1); + + if (MPI_Allreduce(&local_failure, &global_failures, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS) { + if (pass) { + pass = FALSE; + failure_mssg = "MPI_Allreduce() failed.\n"; + } + } + else if ((pass) && (global_failures > 0)) { + pass = FALSE; + failure_mssg = "One or more processes report failure.\n"; + } + + /* report results and finish cleanup */ + if (group_rank == 0) { + if (pass) { + PASSED(); + } + else { + H5_FAILED(); + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + HDremove(reloc_data_filename); + } + + return (!pass); + +} /* test_parallel_read() */ + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: To implement a parallel test which validates whether the + * new superblock lookup functionality is working correctly. + * + * The test consists of creating two separate HDF datasets + * in which random text is inserted at the start of each + * file using the 'j5jam' application. This forces the + * HDF5 file superblock to a non-zero offset. + * Having created the two independent files, we create two + * non-overlapping MPI groups, each of which is then tasked + * with the opening and validation of the data contained + * therein. + * + * Return: Success: 0 + * Failure: 1 + * + * Programmer: Richard Warren + * 10/1/17 + *------------------------------------------------------------------------- + */ + +int +main(int argc, char **argv) +{ + int nerrs = 0; + int which_group = 0; + int mpi_rank; + int mpi_size; + int split_size; + MPI_Comm group_comm = MPI_COMM_NULL; + + /* I don't believe that argv[0] can ever be NULL. + * It should thus be safe to dup and save as a check + * for cmake testing. Note that in our Cmake builds, + * all executables are located in the same directory. + * We assume (but we'll check) that the h5jam utility + * is in the directory as this executable. If that + * isn't true, then we can use a relative path that + * should be valid for the autotools environment. + */ + test_argv0 = HDstrdup(argv[0]); + + if ((MPI_Init(&argc, &argv)) != MPI_SUCCESS) { + HDfprintf(stderr, "FATAL: Unable to initialize MPI\n"); + HDexit(EXIT_FAILURE); + } + + if ((MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) { + HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n"); + HDexit(EXIT_FAILURE); + } + + if ((MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) { + HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n"); + HDexit(EXIT_FAILURE); + } + + H5open(); + + if (mpi_rank == 0) { + HDfprintf(stdout, "========================================\n"); + HDfprintf(stdout, "Collective file open optimization tests\n"); + HDfprintf(stdout, " mpi_size = %d\n", mpi_size); + HDfprintf(stdout, "========================================\n"); + } + + if (mpi_size < 3) { + + if (mpi_rank == 0) { + + HDprintf(" Need at least 3 processes. Exiting.\n"); + } + goto finish; + } + + /* ------ Create two (2) MPI groups ------ + * + * We split MPI_COMM_WORLD into 2 more or less equal sized + * groups. The resulting communicators will be used to generate + * two HDF files which in turn will be opened in parallel and the + * contents verified in the second read test below. + */ + split_size = mpi_size / 2; + which_group = (mpi_rank < split_size ? 0 : 1); + + if ((MPI_Comm_split(MPI_COMM_WORLD, which_group, 0, &group_comm)) != MPI_SUCCESS) { + + HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n"); + HDexit(EXIT_FAILURE); + } + + /* ------ Generate all files ------ */ + + /* We generate the file used for test 1 */ + nerrs += generate_test_file(MPI_COMM_WORLD, mpi_rank, which_group); + + if (nerrs > 0) { + if (mpi_rank == 0) { + HDprintf(" Test(1) file construction failed -- skipping tests.\n"); + } + goto finish; + } + + /* We generate the file used for test 2 */ + nerrs += generate_test_file(group_comm, mpi_rank, which_group); + + if (nerrs > 0) { + if (mpi_rank == 0) { + HDprintf(" Test(2) file construction failed -- skipping tests.\n"); + } + goto finish; + } + + /* Now read the generated test file (still using MPI_COMM_WORLD) */ + nerrs += test_parallel_read(MPI_COMM_WORLD, mpi_rank, mpi_size, which_group); + + if (nerrs > 0) { + if (mpi_rank == 0) { + HDprintf(" Parallel read test(1) failed -- skipping tests.\n"); + } + goto finish; + } + + /* Update the user on our progress so far. */ + if (mpi_rank == 0) { + HDprintf(" Test 1 of 2 succeeded\n"); + HDprintf(" -- Starting multi-group parallel read test.\n"); + } + + /* run the 2nd set of tests */ + nerrs += test_parallel_read(group_comm, mpi_rank, mpi_size, which_group); + + if (nerrs > 0) { + if (mpi_rank == 0) { + HDprintf(" Multi-group read test(2) failed\n"); + } + goto finish; + } + + if (mpi_rank == 0) { + HDprintf(" Test 2 of 2 succeeded\n"); + } + +finish: + + if ((group_comm != MPI_COMM_NULL) && (MPI_Comm_free(&group_comm)) != MPI_SUCCESS) { + HDfprintf(stderr, "MPI_Comm_free failed!\n"); + } + + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(MPI_COMM_WORLD); + + if (mpi_rank == 0) { /* only process 0 reports */ + const char *header = "Collective file open optimization tests"; + + HDfprintf(stdout, "===================================\n"); + if (nerrs > 0) { + HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs); + } + else { + HDfprintf(stdout, "%s finished with no failures\n", header); + } + HDfprintf(stdout, "===================================\n"); + } + + /* close HDF5 library */ + if (H5close() != SUCCEED) { + HDfprintf(stdout, "H5close() failed. (Ignoring)\n"); + } + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrs) because exit code is limited to 1byte */ + return ((nerrs > 0) ? EXIT_FAILURE : EXIT_SUCCESS); + +} /* main() */ diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c index fab4a7c..bfa72b6 100644 --- a/testpar/t_prestart.c +++ b/testpar/t_prestart.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -23,37 +20,35 @@ #include "testphdf5.h" -int nerrors = 0; /* errors count */ +int nerrors = 0; /* errors count */ -const char *FILENAME[] = { - "shutdown", - NULL -}; +const char *FILENAME[] = {"shutdown", NULL}; int -main (int argc, char **argv) +main(int argc, char **argv) { - hid_t file_id, dset_id, grp_id; - hid_t fapl, sid, mem_dataspace; - herr_t ret; - char filename[1024]; - int mpi_size, mpi_rank, ndims, i, j; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - hsize_t dims[RANK]; - hsize_t start[RANK]; - hsize_t count[RANK]; - hsize_t stride[RANK]; - hsize_t block[RANK]; - DATATYPE *data_array = NULL, *dataptr; /* data buffer */ + hid_t file_id, dset_id, grp_id; + hid_t fapl, sid, mem_dataspace; + herr_t ret; + char filename[1024]; + int mpi_size, mpi_rank, ndims; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + hsize_t dims[RANK]; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + hsize_t i, j; + DATATYPE *data_array = NULL, *dataptr; /* data buffer */ MPI_Init(&argc, &argv); MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_rank(comm, &mpi_rank); + + if (MAINPROCESS) + TESTING("proper shutdown of HDF5 library"); - if(MAINPROCESS) - TESTING("proper shutdown of HDF5 library"); - /* Set up file access property list with parallel I/O access */ fapl = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl >= 0), "H5Pcreate succeeded"); @@ -75,64 +70,62 @@ main (int argc, char **argv) ndims = H5Sget_simple_extent_dims(sid, dims, NULL); VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded"); - VRFY(dims[0] == ROW_FACTOR*mpi_size, "Wrong dataset dimensions"); - VRFY(dims[1] == COL_FACTOR*mpi_size, "Wrong dataset dimensions"); + VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions"); + VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions"); /* allocate memory for data buffer */ - data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE)); + data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE)); VRFY((data_array != NULL), "data_array HDmalloc succeeded"); /* Each process takes a slabs of rows. */ - block[0] = dims[0]/mpi_size; - block[1] = dims[1]; + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; stride[0] = block[0]; stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = mpi_rank*block[0]; - start[1] = 0; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* write data independently */ - ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, - H5P_DEFAULT, data_array); + ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); VRFY((ret >= 0), "H5Dwrite succeeded"); dataptr = data_array; - for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - if(*dataptr != mpi_rank+1) { - printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, - (unsigned long)(i+start[0]), (unsigned long)(j+start[1]), - mpi_rank+1, *(dataptr)); - nerrors ++; + for (i = 0; i < block[0]; i++) { + for (j = 0; j < block[1]; j++) { + if (*dataptr != mpi_rank + 1) { + HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", + (unsigned long)i, (unsigned long)j, (unsigned long)((hsize_t)i + start[0]), + (unsigned long)((hsize_t)j + start[1]), mpi_rank + 1, *(dataptr)); + nerrors++; } dataptr++; - } + } } MPI_Finalize(); HDremove(filename); /* release data buffers */ - if(data_array) + if (data_array) HDfree(data_array); nerrors += GetTestNumErrs(); - if(MAINPROCESS) { - if(0 == nerrors) - PASSED() + if (MAINPROCESS) { + if (0 == nerrors) + PASSED(); else - H5_FAILED() + H5_FAILED(); } - return (nerrors!=0); + return (nerrors != 0); } diff --git a/testpar/t_prop.c b/testpar/t_prop.c index 2cc0f5e..6f7e28b 100644 --- a/testpar/t_prop.c +++ b/testpar/t_prop.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -25,22 +22,22 @@ static int test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) { MPI_Request req[2]; - MPI_Status status; - hid_t pl; /* Decoded property list */ - size_t buf_size = 0; - void *sbuf = NULL; - herr_t ret; /* Generic return value */ + MPI_Status status; + hid_t pl; /* Decoded property list */ + size_t buf_size = 0; + void *sbuf = NULL; + herr_t ret; /* Generic return value */ - if(mpi_rank == 0) { + if (mpi_rank == 0) { int send_size = 0; /* first call to encode returns only the size of the buffer needed */ - ret = H5Pencode(orig_pl, NULL, &buf_size); + ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT); VRFY((ret >= 0), "H5Pencode succeeded"); sbuf = (uint8_t *)HDmalloc(buf_size); - ret = H5Pencode(orig_pl, sbuf, &buf_size); + ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT); VRFY((ret >= 0), "H5Pencode succeeded"); /* this is a temp fix to send this size_t */ @@ -50,13 +47,13 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]); } /* end if */ - if(mpi_rank == recv_proc) { - int recv_size; + if (mpi_rank == recv_proc) { + int recv_size; void *rbuf; MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status); - buf_size = recv_size; - rbuf = (uint8_t *)HDmalloc(buf_size); + buf_size = (size_t)recv_size; + rbuf = (uint8_t *)HDmalloc(buf_size); MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status); pl = H5Pdecode(rbuf); @@ -67,89 +64,96 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc) ret = H5Pclose(pl); VRFY((ret >= 0), "H5Pclose succeeded"); - if(NULL != rbuf) + if (NULL != rbuf) HDfree(rbuf); } /* end if */ - if(0 == mpi_rank) + if (0 == mpi_rank) { + /* gcc 11 complains about passing MPI_STATUSES_IGNORE as an MPI_Status + * array. See the discussion here: + * + * https://github.com/pmodels/mpich/issues/5687 + */ + H5_GCC_DIAG_OFF("stringop-overflow") MPI_Waitall(2, req, MPI_STATUSES_IGNORE); + H5_GCC_DIAG_ON("stringop-overflow") + } - if(NULL != sbuf) + if (NULL != sbuf) HDfree(sbuf); MPI_Barrier(MPI_COMM_WORLD); - return(0); + return 0; } void test_plist_ed(void) { - hid_t dcpl; /* dataset create prop. list */ - hid_t dapl; /* dataset access prop. list */ - hid_t dxpl; /* dataset transfer prop. list */ - hid_t gcpl; /* group create prop. list */ - hid_t lcpl; /* link create prop. list */ - hid_t lapl; /* link access prop. list */ - hid_t ocpypl; /* object copy prop. list */ - hid_t ocpl; /* object create prop. list */ - hid_t fapl; /* file access prop. list */ - hid_t fcpl; /* file create prop. list */ - hid_t strcpl; /* string create prop. list */ - hid_t acpl; /* attribute create prop. list */ + hid_t dcpl; /* dataset create prop. list */ + hid_t dapl; /* dataset access prop. list */ + hid_t dxpl; /* dataset transfer prop. list */ + hid_t gcpl; /* group create prop. list */ + hid_t lcpl; /* link create prop. list */ + hid_t lapl; /* link access prop. list */ + hid_t ocpypl; /* object copy prop. list */ + hid_t ocpl; /* object create prop. list */ + hid_t fapl; /* file access prop. list */ + hid_t fcpl; /* file create prop. list */ + hid_t strcpl; /* string create prop. list */ + hid_t acpl; /* attribute create prop. list */ int mpi_size, mpi_rank, recv_proc; - hsize_t chunk_size = 16384; /* chunk size */ - double fill = 2.7f; /* Fill value */ - size_t nslots = 521*2; - size_t nbytes = 1048576 * 10; - double w0 = 0.5f; - unsigned max_compact; - unsigned min_dense; - hsize_t max_size[1]; /*data space maximum size */ - const char* c_to_f = "x+32"; - H5AC_cache_config_t my_cache_config = { - H5AC__CURR_CACHE_CONFIG_VERSION, - TRUE, - FALSE, - FALSE, - "temp", - TRUE, - FALSE, - ( 2 * 2048 * 1024), - 0.3f, - (64 * 1024 * 1024), - (4 * 1024 * 1024), - 60000, - H5C_incr__threshold, - 0.8f, - 3.0f, - TRUE, - (8 * 1024 * 1024), - H5C_flash_incr__add_space, - 2.0f, - 0.25f, - H5C_decr__age_out_with_threshold, - 0.997f, - 0.8f, - TRUE, - (3 * 1024 * 1024), - 3, - FALSE, - 0.2f, - (256 * 2048), - H5AC__DEFAULT_METADATA_WRITE_STRATEGY}; - - herr_t ret; /* Generic return value */ - - if(VERBOSE_MED) - printf("Encode/Decode DCPLs\n"); + hsize_t chunk_size = 16384; /* chunk size */ + double fill = 2.7; /* Fill value */ + size_t nslots = 521 * 2; + size_t nbytes = 1048576 * 10; + double w0 = 0.5; + unsigned max_compact; + unsigned min_dense; + hsize_t max_size[1]; /*data space maximum size */ + const char *c_to_f = "x+32"; + H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION, + TRUE, + FALSE, + FALSE, + "temp", + TRUE, + FALSE, + (2 * 2048 * 1024), + 0.3, + (64 * 1024 * 1024), + (4 * 1024 * 1024), + 60000, + H5C_incr__threshold, + 0.8, + 3.0, + TRUE, + (8 * 1024 * 1024), + H5C_flash_incr__add_space, + 2.0, + 0.25, + H5C_decr__age_out_with_threshold, + 0.997, + 0.8, + TRUE, + (3 * 1024 * 1024), + 3, + FALSE, + 0.2, + (256 * 2048), + H5AC__DEFAULT_METADATA_WRITE_STRATEGY}; + + herr_t ret; /* Generic return value */ + + if (VERBOSE_MED) + HDprintf("Encode/Decode DCPLs\n"); /* set up MPI parameters */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - if(mpi_size == 1) + if (mpi_size == 1) recv_proc = 0; else recv_proc = 1; @@ -164,21 +168,17 @@ test_plist_ed(void) VRFY((ret >= 0), "H5Pset_alloc_time succeeded"); ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill); - VRFY((ret>=0), "set fill-value succeeded"); + VRFY((ret >= 0), "set fill-value succeeded"); max_size[0] = 100; - ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, - (hsize_t)(max_size[0] * sizeof(int)/4)); - VRFY((ret>=0), "set external succeeded"); - ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, - (hsize_t)(max_size[0] * sizeof(int)/4)); - VRFY((ret>=0), "set external succeeded"); - ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, - (hsize_t)(max_size[0] * sizeof(int)/4)); - VRFY((ret>=0), "set external succeeded"); - ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, - (hsize_t)(max_size[0] * sizeof(int)/4)); - VRFY((ret>=0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); + ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4)); + VRFY((ret >= 0), "set external succeeded"); ret = test_encode_decode(dcpl, mpi_rank, recv_proc); VRFY((ret >= 0), "test_encode_decode succeeded"); @@ -186,7 +186,6 @@ test_plist_ed(void) ret = H5Pclose(dcpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE DAPLS *****/ dapl = H5Pcreate(H5P_DATASET_ACCESS); VRFY((dapl >= 0), "H5Pcreate succeeded"); @@ -200,7 +199,6 @@ test_plist_ed(void) ret = H5Pclose(dapl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE OCPLS *****/ ocpl = H5Pcreate(H5P_OBJECT_CREATE); VRFY((ocpl >= 0), "H5Pcreate succeeded"); @@ -220,12 +218,11 @@ test_plist_ed(void) ret = H5Pclose(ocpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE DXPLS *****/ dxpl = H5Pcreate(H5P_DATASET_XFER); VRFY((dxpl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_btree_ratios(dxpl, 0.2f, 0.6f, 0.2f); + ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2); VRFY((ret >= 0), "H5Pset_btree_ratios succeeded"); ret = H5Pset_hyper_vector_size(dxpl, 5); @@ -258,7 +255,6 @@ test_plist_ed(void) ret = H5Pclose(dxpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE GCPLS *****/ gcpl = H5Pcreate(H5P_GROUP_CREATE); VRFY((gcpl >= 0), "H5Pcreate succeeded"); @@ -285,12 +281,11 @@ test_plist_ed(void) ret = H5Pclose(gcpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE LCPLS *****/ lcpl = H5Pcreate(H5P_LINK_CREATE); VRFY((lcpl >= 0), "H5Pcreate succeeded"); - ret= H5Pset_create_intermediate_group(lcpl, TRUE); + ret = H5Pset_create_intermediate_group(lcpl, TRUE); VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded"); ret = test_encode_decode(lcpl, mpi_rank, recv_proc); @@ -299,7 +294,6 @@ test_plist_ed(void) ret = H5Pclose(lcpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE LAPLS *****/ lapl = H5Pcreate(H5P_LINK_ACCESS); VRFY((lapl >= 0), "H5Pcreate succeeded"); @@ -332,7 +326,6 @@ test_plist_ed(void) ret = H5Pclose(lapl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE OCPYPLS *****/ ocpypl = H5Pcreate(H5P_OBJECT_COPY); VRFY((ocpypl >= 0), "H5Pcreate succeeded"); @@ -352,7 +345,6 @@ test_plist_ed(void) ret = H5Pclose(ocpypl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE FAPLS *****/ fapl = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl >= 0), "H5Pcreate succeeded"); @@ -369,7 +361,7 @@ test_plist_ed(void) ret = H5Pset_alignment(fapl, 2, 1024); VRFY((ret >= 0), "H5Pset_alignment succeeded"); - ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3f); + ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3); VRFY((ret >= 0), "H5Pset_cache succeeded"); ret = H5Pset_elink_file_cache_size(fapl, 10485760); @@ -399,7 +391,6 @@ test_plist_ed(void) ret = H5Pclose(fapl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE FCPLS *****/ fcpl = H5Pcreate(H5P_FILE_CREATE); VRFY((fcpl >= 0), "H5Pcreate succeeded"); @@ -416,7 +407,7 @@ test_plist_ed(void) ret = H5Pset_shared_mesg_nindexes(fcpl, 8); VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded"); - ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32); + ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32); VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded"); ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20); @@ -431,7 +422,6 @@ test_plist_ed(void) ret = H5Pclose(fcpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE STRCPLS *****/ strcpl = H5Pcreate(H5P_STRING_CREATE); VRFY((strcpl >= 0), "H5Pcreate succeeded"); @@ -445,7 +435,6 @@ test_plist_ed(void) ret = H5Pclose(strcpl); VRFY((ret >= 0), "H5Pclose succeeded"); - /******* ENCODE/DECODE ACPLS *****/ acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE); VRFY((acpl >= 0), "H5Pcreate succeeded"); @@ -460,3 +449,191 @@ test_plist_ed(void) VRFY((ret >= 0), "H5Pclose succeeded"); } +void +external_links(void) +{ + hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */ + hid_t lapl = H5I_INVALID_HID; /* link access prop. list */ + hid_t fapl = H5I_INVALID_HID; /* file access prop. list */ + hid_t gapl = H5I_INVALID_HID; /* group access prop. list */ + hid_t fid = H5I_INVALID_HID; /* file id */ + hid_t group = H5I_INVALID_HID; /* group id */ + int mpi_size, mpi_rank; + + MPI_Comm comm; + int doIO; + int i, mrc; + + herr_t ret; /* Generic return value */ + htri_t tri_status; /* tri return value */ + + const char *filename = "HDF5test.h5"; + const char *filename_ext = "HDF5test_ext.h5"; + const char *group_path = "/Base/Block/Step"; + const char *link_name = "link"; /* external link */ + char link_path[50]; + + if (VERBOSE_MED) + HDprintf("Check external links\n"); + + /* set up MPI parameters */ + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Check MPI communicator access properties are passed to + linked external files */ + + if (mpi_rank == 0) { + + lcpl = H5Pcreate(H5P_LINK_CREATE); + VRFY((lcpl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_create_intermediate_group(lcpl, 1); + VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded"); + + /* Create file to serve as target for external link.*/ + fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT); + VRFY((group >= 0), "H5Gcreate succeeded"); + + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + + /* Create a new file using the file access property list. */ + fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); + VRFY((fid >= 0), "H5Fcreate succeeded"); + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT); + VRFY((group >= 0), "H5Gcreate succeeded"); + + /* Create external links to the target files. */ + ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT); + VRFY((ret >= 0), "H5Lcreate_external succeeded"); + + /* Close and release resources. */ + ret = H5Pclose(lcpl); + VRFY((ret >= 0), "H5Pclose succeeded"); + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + MPI_Barrier(MPI_COMM_WORLD); + + /* + * For the first case, use all the processes. For the second case + * use a sub-communicator to verify the correct communicator is + * being used for the externally linked files. + * There is no way to determine if MPI info is being used for the + * externally linked files. + */ + + for (i = 0; i < 2; i++) { + + comm = MPI_COMM_WORLD; + + if (i == 0) + doIO = 1; + else { + doIO = mpi_rank % 2; + mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm); + VRFY((mrc == MPI_SUCCESS), ""); + } + + if (doIO) { + fapl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((fapl >= 0), "H5Pcreate succeeded"); + ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL); + VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded"); + + fid = H5Fopen(filename, H5F_ACC_RDWR, fapl); + VRFY((fid >= 0), "H5Fopen succeeded"); + + /* test opening a group that is to an external link, the external linked + file should inherit the source file's access properties */ + HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name); + group = H5Gopen2(fid, link_path, H5P_DEFAULT); + VRFY((group >= 0), "H5Gopen succeeded"); + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + + /* test opening a group that is external link by setting group + creation property */ + gapl = H5Pcreate(H5P_GROUP_ACCESS); + VRFY((gapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_elink_fapl(gapl, fapl); + VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); + + group = H5Gopen2(fid, link_path, gapl); + VRFY((group >= 0), "H5Gopen succeeded"); + + ret = H5Gclose(group); + VRFY((ret >= 0), "H5Gclose succeeded"); + + ret = H5Pclose(gapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* test link APIs */ + lapl = H5Pcreate(H5P_LINK_ACCESS); + VRFY((lapl >= 0), "H5Pcreate succeeded"); + + ret = H5Pset_elink_fapl(lapl, fapl); + VRFY((ret >= 0), "H5Pset_elink_fapl succeeded"); + + tri_status = H5Lexists(fid, link_path, H5P_DEFAULT); + VRFY((tri_status == TRUE), "H5Lexists succeeded"); + + tri_status = H5Lexists(fid, link_path, lapl); + VRFY((tri_status == TRUE), "H5Lexists succeeded"); + + group = H5Oopen(fid, link_path, H5P_DEFAULT); + VRFY((group >= 0), "H5Oopen succeeded"); + + ret = H5Oclose(group); + VRFY((ret >= 0), "H5Oclose succeeded"); + + group = H5Oopen(fid, link_path, lapl); + VRFY((group >= 0), "H5Oopen succeeded"); + + ret = H5Oclose(group); + VRFY((ret >= 0), "H5Oclose succeeded"); + + ret = H5Pclose(lapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + /* close the remaining resources */ + + ret = H5Pclose(fapl); + VRFY((ret >= 0), "H5Pclose succeeded"); + + ret = H5Fclose(fid); + VRFY((ret >= 0), "H5Fclose succeeded"); + } + + if (comm != MPI_COMM_WORLD) { + mrc = MPI_Comm_free(&comm); + VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded"); + } + } + + MPI_Barrier(MPI_COMM_WORLD); + + /* delete the test files */ + if (mpi_rank == 0) { + MPI_File_delete(filename, MPI_INFO_NULL); + MPI_File_delete(filename_ext, MPI_INFO_NULL); + } +} diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c index be9734f..278fd09 100644 --- a/testpar/t_pshutdown.c +++ b/testpar/t_pshutdown.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -27,37 +24,34 @@ #include "testphdf5.h" -int nerrors = 0; /* errors count */ +int nerrors = 0; /* errors count */ -const char *FILENAME[] = { - "shutdown", - NULL -}; +const char *FILENAME[] = {"shutdown", NULL}; int -main (int argc, char **argv) +main(int argc, char **argv) { - hid_t file_id, dset_id, grp_id; - hid_t fapl, sid, mem_dataspace; - hsize_t dims[RANK], i; - herr_t ret; - char filename[1024]; - int mpi_size, mpi_rank; - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - hsize_t start[RANK]; - hsize_t count[RANK]; - hsize_t stride[RANK]; - hsize_t block[RANK]; - DATATYPE *data_array = NULL; /* data buffer */ + hid_t file_id, dset_id, grp_id; + hid_t fapl, sid, mem_dataspace; + hsize_t dims[RANK], i; + herr_t ret; + char filename[1024]; + int mpi_size, mpi_rank; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + hsize_t start[RANK]; + hsize_t count[RANK]; + hsize_t stride[RANK]; + hsize_t block[RANK]; + DATATYPE *data_array = NULL; /* data buffer */ MPI_Init(&argc, &argv); MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); + MPI_Comm_rank(comm, &mpi_rank); + + if (MAINPROCESS) + TESTING("proper shutdown of HDF5 library"); - if(MAINPROCESS) - TESTING("proper shutdown of HDF5 library"); - /* Set up file access property list with parallel I/O access */ fapl = H5Pcreate(H5P_FILE_ACCESS); VRFY((fapl >= 0), "H5Pcreate succeeded"); @@ -70,58 +64,57 @@ main (int argc, char **argv) grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((grp_id >= 0), "H5Gcreate succeeded"); - dims[0] = ROW_FACTOR*mpi_size; - dims[1] = COL_FACTOR*mpi_size; - sid = H5Screate_simple (RANK, dims, NULL); + dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size; + dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size; + sid = H5Screate_simple(RANK, dims, NULL); VRFY((sid >= 0), "H5Screate_simple succeeded"); dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); VRFY((dset_id >= 0), "H5Dcreate succeeded"); /* allocate memory for data buffer */ - data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE)); + data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE)); VRFY((data_array != NULL), "data_array HDmalloc succeeded"); /* Each process takes a slabs of rows. */ - block[0] = dims[0]/mpi_size; - block[1] = dims[1]; + block[0] = dims[0] / (hsize_t)mpi_size; + block[1] = dims[1]; stride[0] = block[0]; stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = mpi_rank*block[0]; - start[1] = 0; + count[0] = 1; + count[1] = 1; + start[0] = (hsize_t)mpi_rank * block[0]; + start[1] = 0; /* put some trivial data in the data_array */ - for(i=0 ; i<dims[0]*dims[1]; i++) + for (i = 0; i < dims[0] * dims[1]; i++) data_array[i] = mpi_rank + 1; ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block); VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (RANK, block, NULL); + mem_dataspace = H5Screate_simple(RANK, block, NULL); VRFY((mem_dataspace >= 0), ""); /* write data independently */ - ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, - H5P_DEFAULT, data_array); + ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array); VRFY((ret >= 0), "H5Dwrite succeeded"); /* release data buffers */ - if(data_array) + if (data_array) HDfree(data_array); MPI_Finalize(); nerrors += GetTestNumErrs(); - if(MAINPROCESS) { - if(0 == nerrors) - PASSED() + if (MAINPROCESS) { + if (0 == nerrors) + PASSED(); else - H5_FAILED() + H5_FAILED(); } - return (nerrors!=0); + return (nerrors != 0); } diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c index 9088470..cbae5e1 100644 --- a/testpar/t_shapesame.c +++ b/testpar/t_shapesame.c @@ -4,119 +4,107 @@ * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* - This program will test independant and collective reads and writes between - selections of different rank that non-the-less are deemed as having the + This program will test independent and collective reads and writes between + selections of different rank that non-the-less are deemed as having the same shape by H5Sselect_shape_same(). */ -#define H5S_PACKAGE /*suppress error about including H5Spkg */ +#define H5S_FRIEND /*suppress error about including H5Spkg */ /* Define this macro to indicate that the testing APIs should be available */ #define H5S_TESTING - -#include "hdf5.h" -#include "H5private.h" +#include "H5Spkg.h" /* Dataspaces */ #include "testphdf5.h" -#include "H5Spkg.h" /* Dataspaces */ - /* On Lustre (and perhaps other parallel file systems?), we have severe * slow downs if two or more processes attempt to access the same file system * block. To minimize this problem, we set alignment in the shape same tests - * to the default Lustre block size -- which greatly reduces contention in + * to the default Lustre block size -- which greatly reduces contention in * the chunked dataset case. */ -#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024)) - - -#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */ - -struct hs_dr_pio_test_vars_t -{ - int mpi_size; - int mpi_rank; - MPI_Comm mpi_comm; - MPI_Info mpi_info; - int test_num; - int edge_size; - int checker_edge_size; - int chunk_edge_size; - int small_rank; - int large_rank; - hid_t dset_type; - uint32_t * small_ds_buf_0; - uint32_t * small_ds_buf_1; - uint32_t * small_ds_buf_2; - uint32_t * small_ds_slice_buf; - uint32_t * large_ds_buf_0; - uint32_t * large_ds_buf_1; - uint32_t * large_ds_buf_2; - uint32_t * large_ds_slice_buf; - int small_ds_offset; - int large_ds_offset; - hid_t fid; /* HDF5 file ID */ - hid_t xfer_plist; - hid_t full_mem_small_ds_sid; - hid_t full_file_small_ds_sid; - hid_t mem_small_ds_sid; - hid_t file_small_ds_sid_0; - hid_t file_small_ds_sid_1; - hid_t small_ds_slice_sid; - hid_t full_mem_large_ds_sid; - hid_t full_file_large_ds_sid; - hid_t mem_large_ds_sid; - hid_t file_large_ds_sid_0; - hid_t file_large_ds_sid_1; - hid_t file_large_ds_process_slice_sid; - hid_t mem_large_ds_process_slice_sid; - hid_t large_ds_slice_sid; - hid_t small_dataset; /* Dataset ID */ - hid_t large_dataset; /* Dataset ID */ - size_t small_ds_size; - size_t small_ds_slice_size; - size_t large_ds_size; - size_t large_ds_slice_size; - hsize_t dims[PAR_SS_DR_MAX_RANK]; - hsize_t chunk_dims[PAR_SS_DR_MAX_RANK]; - hsize_t start[PAR_SS_DR_MAX_RANK]; - hsize_t stride[PAR_SS_DR_MAX_RANK]; - hsize_t count[PAR_SS_DR_MAX_RANK]; - hsize_t block[PAR_SS_DR_MAX_RANK]; - hsize_t * start_ptr; - hsize_t * stride_ptr; - hsize_t * count_ptr; - hsize_t * block_ptr; - int skips; - int max_skips; - int64_t total_tests; - int64_t tests_run; - int64_t tests_skipped; +#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024)) + +#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */ + +struct hs_dr_pio_test_vars_t { + int mpi_size; + int mpi_rank; + MPI_Comm mpi_comm; + MPI_Info mpi_info; + int test_num; + int edge_size; + int checker_edge_size; + int chunk_edge_size; + int small_rank; + int large_rank; + hid_t dset_type; + uint32_t *small_ds_buf_0; + uint32_t *small_ds_buf_1; + uint32_t *small_ds_buf_2; + uint32_t *small_ds_slice_buf; + uint32_t *large_ds_buf_0; + uint32_t *large_ds_buf_1; + uint32_t *large_ds_buf_2; + uint32_t *large_ds_slice_buf; + int small_ds_offset; + int large_ds_offset; + hid_t fid; /* HDF5 file ID */ + hid_t xfer_plist; + hid_t full_mem_small_ds_sid; + hid_t full_file_small_ds_sid; + hid_t mem_small_ds_sid; + hid_t file_small_ds_sid_0; + hid_t file_small_ds_sid_1; + hid_t small_ds_slice_sid; + hid_t full_mem_large_ds_sid; + hid_t full_file_large_ds_sid; + hid_t mem_large_ds_sid; + hid_t file_large_ds_sid_0; + hid_t file_large_ds_sid_1; + hid_t file_large_ds_process_slice_sid; + hid_t mem_large_ds_process_slice_sid; + hid_t large_ds_slice_sid; + hid_t small_dataset; /* Dataset ID */ + hid_t large_dataset; /* Dataset ID */ + size_t small_ds_size; + size_t small_ds_slice_size; + size_t large_ds_size; + size_t large_ds_slice_size; + hsize_t dims[PAR_SS_DR_MAX_RANK]; + hsize_t chunk_dims[PAR_SS_DR_MAX_RANK]; + hsize_t start[PAR_SS_DR_MAX_RANK]; + hsize_t stride[PAR_SS_DR_MAX_RANK]; + hsize_t count[PAR_SS_DR_MAX_RANK]; + hsize_t block[PAR_SS_DR_MAX_RANK]; + hsize_t *start_ptr; + hsize_t *stride_ptr; + hsize_t *count_ptr; + hsize_t *block_ptr; + int skips; + int max_skips; + int64_t total_tests; + int64_t tests_run; + int64_t tests_skipped; }; /*------------------------------------------------------------------------- - * Function: hs_dr_pio_test__setup() - * - * Purpose: Do setup for tests of I/O to/from hyperslab selections of - * different rank in the parallel case. + * Function: hs_dr_pio_test__setup() * - * Return: void + * Purpose: Do setup for tests of I/O to/from hyperslab selections of + * different rank in the parallel case. * - * Programmer: JRM -- 8/9/11 + * Return: void * - * Modifications: - * - * None. + * Programmer: JRM -- 8/9/11 * *------------------------------------------------------------------------- */ @@ -124,60 +112,53 @@ struct hs_dr_pio_test_vars_t #define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0 static void -hs_dr_pio_test__setup(const int test_num, - const int edge_size, - const int checker_edge_size, - const int chunk_edge_size, - const int small_rank, - const int large_rank, - const hbool_t use_collective_io, - const hid_t dset_type, - const int express_test, - struct hs_dr_pio_test_vars_t * tv_ptr) +hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size, + const int chunk_edge_size, const int small_rank, const int large_rank, + const hbool_t use_collective_io, const hid_t dset_type, const int express_test, + struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG +#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG const char *fcnName = "hs_dr_pio_test__setup()"; #endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */ const char *filename; - hbool_t mis_match = FALSE; - int i; + hbool_t mis_match = FALSE; + int i; int mrc; - int mpi_rank; /* needed by the VRFY macro */ - uint32_t expected_value; - uint32_t * ptr_0; - uint32_t * ptr_1; - hid_t acc_tpl; /* File access templates */ + int mpi_rank; /* needed by the VRFY macro */ + uint32_t expected_value; + uint32_t *ptr_0; + uint32_t *ptr_1; + hid_t acc_tpl; /* File access templates */ hid_t small_ds_dcpl_id = H5P_DEFAULT; hid_t large_ds_dcpl_id = H5P_DEFAULT; - herr_t ret; /* Generic return value */ + herr_t ret; /* Generic return value */ - HDassert( edge_size >= 6 ); - HDassert( edge_size >= chunk_edge_size ); - HDassert( ( chunk_edge_size == 0 ) || ( chunk_edge_size >= 3 ) ); - HDassert( 1 < small_rank ); - HDassert( small_rank < large_rank ); - HDassert( large_rank <= PAR_SS_DR_MAX_RANK ); + HDassert(edge_size >= 6); + HDassert(edge_size >= chunk_edge_size); + HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3)); + HDassert(1 < small_rank); + HDassert(small_rank < large_rank); + HDassert(large_rank <= PAR_SS_DR_MAX_RANK); - tv_ptr->test_num = test_num; - tv_ptr->edge_size = edge_size; + tv_ptr->test_num = test_num; + tv_ptr->edge_size = edge_size; tv_ptr->checker_edge_size = checker_edge_size; - tv_ptr->chunk_edge_size = chunk_edge_size; - tv_ptr->small_rank = small_rank; - tv_ptr->large_rank = large_rank; - tv_ptr->dset_type = dset_type; + tv_ptr->chunk_edge_size = chunk_edge_size; + tv_ptr->small_rank = small_rank; + tv_ptr->large_rank = large_rank; + tv_ptr->dset_type = dset_type; MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size)); MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank)); /* the VRFY() macro needs the local variable mpi_rank -- set it up now */ mpi_rank = tv_ptr->mpi_rank; - HDassert( tv_ptr->mpi_size >= 1 ); + HDassert(tv_ptr->mpi_size >= 1); tv_ptr->mpi_comm = MPI_COMM_WORLD; tv_ptr->mpi_info = MPI_INFO_NULL; - for ( i = 0; i < tv_ptr->small_rank - 1; i++ ) - { + for (i = 0; i < tv_ptr->small_rank - 1; i++) { tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size); tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size); } @@ -186,10 +167,10 @@ hs_dr_pio_test__setup(const int test_num, /* used by checker board tests only */ tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank; - HDassert( 0 < tv_ptr->small_ds_offset ); - HDassert( tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK ); + HDassert(0 < tv_ptr->small_ds_offset); + HDassert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK); - for ( i = 0; i < tv_ptr->large_rank - 1; i++ ) { + for (i = 0; i < tv_ptr->large_rank - 1; i++) { tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size); tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size); @@ -199,9 +180,8 @@ hs_dr_pio_test__setup(const int test_num, /* used by checker board tests only */ tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank; - HDassert( 0 <= tv_ptr->large_ds_offset ); - HDassert( tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK ); - + HDassert(0 <= tv_ptr->large_ds_offset); + HDassert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK); /* set up the start, stride, count, and block pointers */ /* used by contiguous tests only */ @@ -210,7 +190,6 @@ hs_dr_pio_test__setup(const int test_num, tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]); - /* Allocate buffers */ tv_ptr->small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size); VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded"); @@ -221,8 +200,7 @@ hs_dr_pio_test__setup(const int test_num, tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size); VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded"); - tv_ptr->small_ds_slice_buf = - (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size); + tv_ptr->small_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size); VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded"); tv_ptr->large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size); @@ -234,14 +212,13 @@ hs_dr_pio_test__setup(const int test_num, tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size); VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded"); - tv_ptr->large_ds_slice_buf = - (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size); + tv_ptr->large_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size); VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded"); /* initialize the buffers */ ptr_0 = tv_ptr->small_ds_buf_0; - for(i = 0; i < (int)(tv_ptr->small_ds_size); i++) + for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) *ptr_0++ = (uint32_t)i; HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); HDmemset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); @@ -249,7 +226,7 @@ hs_dr_pio_test__setup(const int test_num, HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); ptr_0 = tv_ptr->large_ds_buf_0; - for(i = 0; i < (int)(tv_ptr->large_ds_size); i++) + for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) *ptr_0++ = (uint32_t)i; HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); HDmemset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); @@ -257,23 +234,19 @@ hs_dr_pio_test__setup(const int test_num, HDmemset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size); filename = (const char *)GetTestParameters(); - HDassert( filename != NULL ); -#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG - if ( MAINPROCESS ) { + HDassert(filename != NULL); +#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG + if (MAINPROCESS) { HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num); HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size); - HDfprintf(stdout, - "%d: small/large rank = %d/%d, use_collective_io = %d.\n", - tv_ptr->mpi_rank, tv_ptr->small_rank, tv_ptr->large_rank, - (int)use_collective_io); - HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", - tv_ptr->mpi_rank, tv_ptr->edge_size, tv_ptr->chunk_edge_size); - HDfprintf(stdout, "%d: checker_edge_size = %d.\n", - tv_ptr->mpi_rank, tv_ptr->checker_edge_size); - HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", - tv_ptr->mpi_rank, (int)(tv_ptr->small_ds_size), - (int)(tv_ptr->large_ds_size)); + HDfprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank, + tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io); + HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size, + tv_ptr->chunk_edge_size); + HDfprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size); + HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank, + (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size)); HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename); } #endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */ @@ -288,7 +261,7 @@ hs_dr_pio_test__setup(const int test_num, * the same file system block. Do this only if express_test is greater * than zero. */ - if ( express_test > 0 ) { + if (express_test > 0) { ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT); VRFY((ret != FAIL), "H5Pset_alignment() succeeded"); @@ -304,118 +277,86 @@ hs_dr_pio_test__setup(const int test_num, ret = H5Pclose(acc_tpl); VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); - /* setup dims: */ tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1); - tv_ptr->dims[1] = tv_ptr->dims[2] = - tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size); - + tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size); /* Create small ds dataspaces */ - tv_ptr->full_mem_small_ds_sid = - H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_mem_small_ds_sid != 0), - "H5Screate_simple() full_mem_small_ds_sid succeeded"); + tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded"); - tv_ptr->full_file_small_ds_sid = - H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_file_small_ds_sid != 0), - "H5Screate_simple() full_file_small_ds_sid succeeded"); + tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded"); tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->mem_small_ds_sid != 0), - "H5Screate_simple() mem_small_ds_sid succeeded"); + VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded"); tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_small_ds_sid_0 != 0), - "H5Screate_simple() file_small_ds_sid_0 succeeded"); + VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded"); /* used by checker board tests only */ tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_small_ds_sid_1 != 0), - "H5Screate_simple() file_small_ds_sid_1 succeeded"); - - tv_ptr->small_ds_slice_sid = - H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL); - VRFY((tv_ptr->small_ds_slice_sid != 0), - "H5Screate_simple() small_ds_slice_sid succeeded"); + VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded"); + tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL); + VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded"); /* Create large ds dataspaces */ - tv_ptr->full_mem_large_ds_sid = - H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_mem_large_ds_sid != 0), - "H5Screate_simple() full_mem_large_ds_sid succeeded"); + tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded"); - tv_ptr->full_file_large_ds_sid = - H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->full_file_large_ds_sid != FAIL), - "H5Screate_simple() full_file_large_ds_sid succeeded"); + tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded"); tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->mem_large_ds_sid != FAIL), - "H5Screate_simple() mem_large_ds_sid succeeded"); + VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded"); tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), - "H5Screate_simple() file_large_ds_sid_0 succeeded"); + VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded"); /* used by checker board tests only */ tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), - "H5Screate_simple() file_large_ds_sid_1 succeeded"); + VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded"); - tv_ptr->mem_large_ds_process_slice_sid = - H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL), + tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL), "H5Screate_simple() mem_large_ds_process_slice_sid succeeded"); - tv_ptr->file_large_ds_process_slice_sid = - H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); - VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL), + tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL); + VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL), "H5Screate_simple() file_large_ds_process_slice_sid succeeded"); - - tv_ptr->large_ds_slice_sid = - H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL); - VRFY((tv_ptr->large_ds_slice_sid != 0), - "H5Screate_simple() large_ds_slice_sid succeeded"); - + tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL); + VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded"); /* if chunk edge size is greater than zero, set up the small and * large data set creation property lists to specify chunked * datasets. */ - if ( tv_ptr->chunk_edge_size > 0 ) { + if (tv_ptr->chunk_edge_size > 0) { - /* Under Lustre (and perhaps other parallel file systems?) we get - * locking delays when two or more processes attempt to access the + /* Under Lustre (and perhaps other parallel file systems?) we get + * locking delays when two or more processes attempt to access the * same file system block. * - * To minimize this problem, I have changed chunk_dims[0] + * To minimize this problem, I have changed chunk_dims[0] * from (mpi_size + 1) to just when any sort of express test is - * selected. Given the structure of the test, and assuming we - * set the alignment large enough, this avoids the contention - * issue by seeing to it that each chunk is only accessed by one + * selected. Given the structure of the test, and assuming we + * set the alignment large enough, this avoids the contention + * issue by seeing to it that each chunk is only accessed by one * process. * - * One can argue as to whether this is a good thing to do in our + * One can argue as to whether this is a good thing to do in our * tests, but for now it is necessary if we want the test to complete * in a reasonable amount of time. * * JRM -- 9/16/10 */ - if ( express_test == 0 ) { - tv_ptr->chunk_dims[0] = 1; + tv_ptr->chunk_dims[0] = 1; - } else { - - tv_ptr->chunk_dims[0] = 1; - } - tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = - tv_ptr->chunk_dims[3] = - tv_ptr->chunk_dims[4] = (hsize_t)(tv_ptr->chunk_edge_size); + tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] = + (hsize_t)(tv_ptr->chunk_edge_size); small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded"); @@ -426,7 +367,6 @@ hs_dr_pio_test__setup(const int test_num, ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims); VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); - large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded"); @@ -438,260 +378,185 @@ hs_dr_pio_test__setup(const int test_num, } /* create the small dataset */ - tv_ptr->small_dataset = H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, - tv_ptr->file_small_ds_sid_0, H5P_DEFAULT, - small_ds_dcpl_id, H5P_DEFAULT); + tv_ptr->small_dataset = + H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT, + small_ds_dcpl_id, H5P_DEFAULT); VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded"); /* create the large dataset */ - tv_ptr->large_dataset = H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, - tv_ptr->file_large_ds_sid_0, H5P_DEFAULT, - large_ds_dcpl_id, H5P_DEFAULT); + tv_ptr->large_dataset = + H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT, + large_ds_dcpl_id, H5P_DEFAULT); VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded"); - /* setup xfer property list */ tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - if(use_collective_io) { + if (use_collective_io) { ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); } /* setup selection to write initial data to the small and large data sets */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; - for ( i = 1; i < tv_ptr->large_rank; i++ ) { + for (i = 1; i < tv_ptr->large_rank; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } /* setup selections for writing initial data to the small data set */ - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded"); - - if ( MAINPROCESS ) { /* add an additional slice to the selections */ + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + + if (MAINPROCESS) { /* add an additional slice to the selections */ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, - H5S_SELECT_OR, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, - H5S_SELECT_OR, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) suceeded"); - } + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded"); + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded"); + } /* write the initial value of the small data set to file */ - ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid, + ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid, tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); - /* sync with the other processes before checking data */ - if ( ! use_collective_io ) { + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes"); - } - - /* read the small data set back to verify that it contains the - * expected data. Note that each process reads in the entire + /* read the small data set back to verify that it contains the + * expected data. Note that each process reads in the entire * data set and verifies it. */ - ret = H5Dread(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->full_mem_small_ds_sid, - tv_ptr->full_file_small_ds_sid, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_1); + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid, + tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded"); - /* verify that the correct data was written to the small data set */ expected_value = 0; - mis_match = FALSE; - ptr_1 = tv_ptr->small_ds_buf_1; + mis_match = FALSE; + ptr_1 = tv_ptr->small_ds_buf_1; i = 0; - for ( i = 0; i < (int)(tv_ptr->small_ds_size); i++ ) { + for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } ptr_1++; expected_value++; } - VRFY( (mis_match == FALSE), "small ds init data good."); - + VRFY((mis_match == FALSE), "small ds init data good."); /* setup selections for writing initial data to the large data set */ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded"); - - /* In passing, setup the process slice data spaces as well */ - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), - "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), - "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded"); - - if ( MAINPROCESS ) { /* add an additional slice to the selections */ + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded"); - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded"); - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, - H5S_SELECT_OR, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, - H5S_SELECT_OR, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) suceeded"); - } + /* In passing, setup the process slice dataspaces as well */ + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start, + tv_ptr->stride, tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded"); - /* write the initial value of the large data set to file */ - ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, - tv_ptr->mem_large_ds_sid, tv_ptr->file_large_ds_sid_0, - tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); - if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr); - VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start, + tv_ptr->stride, tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded"); + if (MAINPROCESS) { /* add an additional slice to the selections */ - /* sync with the other processes before checking data */ - if ( ! use_collective_io ) { + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size); + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded"); - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes"); + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded"); } + /* write the initial value of the large data set to file */ + ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stderr); + VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); + + /* sync with the other processes before checking data */ + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); - /* read the large data set back to verify that it contains the - * expected data. Note that each process reads in the entire + /* read the large data set back to verify that it contains the + * expected data. Note that each process reads in the entire * data set. */ - ret = H5Dread(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->full_mem_large_ds_sid, - tv_ptr->full_file_large_ds_sid, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_1); + ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid, + tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded"); - /* verify that the correct data was written to the large data set */ expected_value = 0; - mis_match = FALSE; - ptr_1 = tv_ptr->large_ds_buf_1; + mis_match = FALSE; + ptr_1 = tv_ptr->large_ds_buf_1; i = 0; - for ( i = 0; i < (int)(tv_ptr->large_ds_size); i++ ) { + for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } ptr_1++; expected_value++; } - VRFY( (mis_match == FALSE), "large ds init data good."); - + VRFY((mis_match == FALSE), "large ds init data good."); /* sync with the other processes before changing data */ - - if ( ! use_collective_io ) { - - mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync initial values check"); - } + mrc = MPI_Barrier(MPI_COMM_WORLD); + VRFY((mrc == MPI_SUCCESS), "Sync initial values check"); return; } /* hs_dr_pio_test__setup() */ - /*------------------------------------------------------------------------- - * Function: hs_dr_pio_test__takedown() + * Function: hs_dr_pio_test__takedown() * - * Purpose: Do takedown after tests of I/O to/from hyperslab selections - * of different rank in the parallel case. + * Purpose: Do takedown after tests of I/O to/from hyperslab selections + * of different rank in the parallel case. * - * Return: void + * Return: void * - * Programmer: JRM -- 9/18/09 - * - * Modifications: - * - * None. + * Programmer: JRM -- 9/18/09 * *------------------------------------------------------------------------- */ @@ -699,19 +564,19 @@ hs_dr_pio_test__setup(const int test_num, #define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0 static void -hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr) +hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG +#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG const char *fcnName = "hs_dr_pio_test__takedown()"; -#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */ - int mpi_rank; /* needed by the VRFY macro */ - herr_t ret; /* Generic return value */ +#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */ + int mpi_rank; /* needed by the VRFY macro */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; /* Close property lists */ - if ( tv_ptr->xfer_plist != H5P_DEFAULT ) { + if (tv_ptr->xfer_plist != H5P_DEFAULT) { ret = H5Pclose(tv_ptr->xfer_plist); VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded"); } @@ -773,43 +638,46 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr) /* Free memory buffers */ - if ( tv_ptr->small_ds_buf_0 != NULL ) HDfree(tv_ptr->small_ds_buf_0); - if ( tv_ptr->small_ds_buf_1 != NULL ) HDfree(tv_ptr->small_ds_buf_1); - if ( tv_ptr->small_ds_buf_2 != NULL ) HDfree(tv_ptr->small_ds_buf_2); - if ( tv_ptr->small_ds_slice_buf != NULL ) HDfree(tv_ptr->small_ds_slice_buf); - - if ( tv_ptr->large_ds_buf_0 != NULL ) HDfree(tv_ptr->large_ds_buf_0); - if ( tv_ptr->large_ds_buf_1 != NULL ) HDfree(tv_ptr->large_ds_buf_1); - if ( tv_ptr->large_ds_buf_2 != NULL ) HDfree(tv_ptr->large_ds_buf_2); - if ( tv_ptr->large_ds_slice_buf != NULL ) HDfree(tv_ptr->large_ds_slice_buf); + if (tv_ptr->small_ds_buf_0 != NULL) + HDfree(tv_ptr->small_ds_buf_0); + if (tv_ptr->small_ds_buf_1 != NULL) + HDfree(tv_ptr->small_ds_buf_1); + if (tv_ptr->small_ds_buf_2 != NULL) + HDfree(tv_ptr->small_ds_buf_2); + if (tv_ptr->small_ds_slice_buf != NULL) + HDfree(tv_ptr->small_ds_slice_buf); + + if (tv_ptr->large_ds_buf_0 != NULL) + HDfree(tv_ptr->large_ds_buf_0); + if (tv_ptr->large_ds_buf_1 != NULL) + HDfree(tv_ptr->large_ds_buf_1); + if (tv_ptr->large_ds_buf_2 != NULL) + HDfree(tv_ptr->large_ds_buf_2); + if (tv_ptr->large_ds_slice_buf != NULL) + HDfree(tv_ptr->large_ds_slice_buf); return; } /* hs_dr_pio_test__takedown() */ - /*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__d2m_l2s() - * - * Purpose: Part one of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can read from disk correctly using - * selections of different rank that H5S_select_shape_same() - * views as being of the same shape. + * Function: contig_hs_dr_pio_test__d2m_l2s() * - * In this function, we test this by reading small_rank - 1 - * slices from the on disk large cube, and verifying that the - * data read is correct. Verify that H5S_select_shape_same() - * returns true on the memory and file selections. + * Purpose: Part one of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. * - * Return: void + * Verify that we can read from disk correctly using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. * - * Programmer: JRM -- 9/10/11 + * In this function, we test this by reading small_rank - 1 + * slices from the on disk large cube, and verifying that the + * data read is correct. Verify that H5Sselect_shape_same() + * returns true on the memory and file selections. * - * Modifications: + * Return: void * - * None. + * Programmer: JRM -- 9/10/11 * *------------------------------------------------------------------------- */ @@ -817,42 +685,41 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr) #define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0 static void -contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) +contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG +#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG const char *fcnName = "contig_hs_dr_pio_test__run_test()"; #endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - hbool_t mis_match = FALSE; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - uint32_t expected_value; - uint32_t * ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - - /* We have already done a H5Sselect_all() on the data space - * small_ds_slice_sid in the initialization phase, so no need to + /* We have already done a H5Sselect_all() on the dataspace + * small_ds_slice_sid in the initialization phase, so no need to * call H5Sselect_all() again. */ /* set up start, stride, count, and block -- note that we will * change start[] so as to read slices of the large cube. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } @@ -861,55 +728,53 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) /* zero out the buffer we will be reading into */ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); -#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG - HDfprintf(stdout, - "%s reading slices from big cube on disk into small cube slice.\n", - fcnName); +#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName); #endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ /* in serial versions of this test, we loop through all the dimensions - * of the large data set. However, in the parallel version, each + * of the large data set. However, in the parallel version, each * process only works with that slice of the large cube indicated - * by its rank -- hence we set the most slowly changing index to - * mpi_rank, and don't itterate over it. + * by its rank -- hence we set the most slowly changing index to + * mpi_rank, and don't iterate over it. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -921,16 +786,16 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ - - (tv_ptr->tests_skipped)++; + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ - } else { /* run the test */ + (tv_ptr->tests_skipped)++; + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ - /* we know that small_rank - 1 >= 1 and that - * large_rank > small_rank by the assertions at the head + /* we know that small_rank - 1 >= 1 and that + * large_rank > small_rank by the assertions at the head * of this function. Thus no need for another inner loop. */ tv_ptr->start[0] = (hsize_t)i; @@ -939,59 +804,43 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start_ptr, - tv_ptr->stride_ptr, - tv_ptr->count_ptr, - tv_ptr->block_ptr); - VRFY((ret != FAIL), - "H5Sselect_hyperslab(file_large_cube_sid) succeeded"); - + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded"); - /* verify that H5S_select_shape_same() reports the two + /* verify that H5Sselect_shape_same() reports the two * selections as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->small_ds_slice_sid, - tv_ptr->file_large_ds_sid_0); - VRFY((check == TRUE), "H5S_select_shape_same_test passed"); - + check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); /* Read selection from disk */ -#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), - (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), - (int)(tv_ptr->start[4])); - HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", - fcnName, +#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName, H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid), H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); #endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - ret = H5Dread(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->small_ds_slice_sid, - tv_ptr->file_large_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_slice_buf); + ret = + H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf); VRFY((ret >= 0), "H5Dread() slice from large ds succeeded."); - /* verify that expected data is retrieved */ mis_match = FALSE; - ptr_1 = tv_ptr->small_ds_slice_buf; - expected_value = (uint32_t)( - (i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); + ptr_1 = tv_ptr->small_ds_slice_buf; + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); - for ( n = 0; n < tv_ptr->small_ds_slice_size; n++ ) { + for (n = 0; n < tv_ptr->small_ds_slice_size; n++) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } @@ -1002,55 +851,43 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) expected_value++; } - VRFY((mis_match == FALSE), - "small slice read from large ds data good."); + VRFY((mis_match == FALSE), "small slice read from large ds data good."); - (tv_ptr->tests_run)++; + (tv_ptr->tests_run)++; } l++; (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* contig_hs_dr_pio_test__d2m_l2s() */ - /*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__d2m_s2l() + * Function: contig_hs_dr_pio_test__d2m_s2l() * - * Purpose: Part two of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. + * Purpose: Part two of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. * - * Verify that we can read from disk correctly using - * selections of different rank that H5S_select_shape_same() - * views as being of the same shape. + * Verify that we can read from disk correctly using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. * - * In this function, we test this by reading slices of the - * on disk small data set into slices through the in memory - * large data set, and verify that the correct data (and - * only the correct data) is read. + * In this function, we test this by reading slices of the + * on disk small data set into slices through the in memory + * large data set, and verify that the correct data (and + * only the correct data) is read. * - * Return: void + * Return: void * - * Programmer: JRM -- 8/10/11 - * - * Modifications: - * - * None. + * Programmer: JRM -- 8/10/11 * *------------------------------------------------------------------------- */ @@ -1058,56 +895,49 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) #define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0 static void -contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) +contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG +#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()"; #endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - hbool_t mis_match = FALSE; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t * ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - /* Read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct + /* Read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct * data (and only the correct data) is read. */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; - for ( i = 1; i < tv_ptr->large_rank; i++ ) { + for (i = 1; i < tv_ptr->large_rank; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded"); + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); - -#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG - HDfprintf(stdout, - "%s reading slices of on disk small data set into slices of big data set.\n", - fcnName); +#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName); #endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ /* zero out the in memory large ds */ @@ -1116,68 +946,66 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) /* set up start, stride, count, and block -- note that we will * change start[] so as to read slices of the large cube. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } } - /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. + * of the large data set that don't appear in the small data set. * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't itterate + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate * over it. */ - - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -1189,11 +1017,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; - - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ @@ -1207,74 +1035,57 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, - H5S_SELECT_SET, - tv_ptr->start_ptr, - tv_ptr->stride_ptr, - tv_ptr->count_ptr, - tv_ptr->block_ptr); - VRFY((ret != FAIL), - "H5Sselect_hyperslab(mem_large_ds_sid) succeeded"); - + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded"); - /* verify that H5S_select_shape_same() reports the two + /* verify that H5Sselect_shape_same() reports the two * selections as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0, - tv_ptr->mem_large_ds_sid); - VRFY((check == TRUE), "H5S_select_shape_same_test passed"); - + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); /* Read selection from disk */ -#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), - (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), - (int)(tv_ptr->start[4])); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, +#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); #endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - ret = H5Dread(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_1); + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); /* verify that the expected data and only the * expected data was read. */ - ptr_1 = tv_ptr->large_ds_buf_1; - expected_value = (uint32_t) - ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - start_index = (size_t)( - (i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); + ptr_1 = tv_ptr->large_ds_buf_1; + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - HDassert( start_index < stop_index ); - HDassert( stop_index <= tv_ptr->large_ds_size ); + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->large_ds_size); - for ( n = 0; n < tv_ptr->large_ds_size; n++ ) { + for (n = 0; n < tv_ptr->large_ds_size; n++) { - if ( ( n >= start_index ) && ( n <= stop_index ) ) { + if ((n >= start_index) && (n <= stop_index)) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } expected_value++; + } + else { - } else { - - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { mis_match = TRUE; } @@ -1285,8 +1096,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) ptr_1++; } - VRFY((mis_match == FALSE), - "small slice read from large ds data good."); + VRFY((mis_match == FALSE), "small slice read from large ds data good."); (tv_ptr->tests_run)++; } @@ -1295,47 +1105,36 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* contig_hs_dr_pio_test__d2m_s2l() */ - /*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__m2d_l2s() - * - * Purpose: Part three of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. + * Function: contig_hs_dr_pio_test__m2d_l2s() * - * Verify that we can write from memory to file using - * selections of different rank that H5S_select_shape_same() - * views as being of the same shape. + * Purpose: Part three of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. * - * Do this by writing small_rank - 1 dimensional slices from - * the in memory large data set to the on disk small cube - * dataset. After each write, read the slice of the small - * dataset back from disk, and verify that it contains - * the expected data. Verify that H5S_select_shape_same() - * returns true on the memory and file selections. + * Verify that we can write from memory to file using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. * - * Return: void + * Do this by writing small_rank - 1 dimensional slices from + * the in memory large data set to the on disk small cube + * dataset. After each write, read the slice of the small + * dataset back from disk, and verify that it contains + * the expected data. Verify that H5Sselect_shape_same() + * returns true on the memory and file selections. * - * Programmer: JRM -- 8/10/11 + * Return: void * - * Modifications: - * - * None. + * Programmer: JRM -- 8/10/11 * *------------------------------------------------------------------------- */ @@ -1343,80 +1142,70 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) #define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0 static void -contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) +contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG +#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()"; #endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - hbool_t mis_match = FALSE; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t * ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - /* now we go in the opposite direction, verifying that we can write * from memory to file using selections of different rank that - * H5S_select_shape_same() views as being of the same shape. + * H5Sselect_shape_same() views as being of the same shape. * - * Start by writing small_rank - 1 dimensional slices from the in memory large - * data set to the on disk small cube dataset. After each write, read the - * slice of the small dataset back from disk, and verify that it contains - * the expected data. Verify that H5S_select_shape_same() returns true on + * Start by writing small_rank - 1 dimensional slices from the in memory large + * data set to the on disk small cube dataset. After each write, read the + * slice of the small dataset back from disk, and verify that it contains + * the expected data. Verify that H5Sselect_shape_same() returns true on * the memory and file selections. */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; - for ( i = 1; i < tv_ptr->large_rank; i++ ) { + for (i = 1; i < tv_ptr->large_rank; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded"); + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); /* set up start, stride, count, and block -- note that we will * change start[] so as to read slices of the large cube. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } @@ -1425,60 +1214,56 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) /* zero out the in memory small ds */ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); - -#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG - HDfprintf(stdout, - "%s writing slices from big ds to slices of small ds on disk.\n", - fcnName); +#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName); #endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. + * of the large data set that don't appear in the small data set. * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't itterate + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate * over it. */ - - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } j = 0; do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -1490,11 +1275,11 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; - - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ @@ -1504,12 +1289,8 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) */ /* zero out this rank's slice of the on disk small data set */ - ret = H5Dwrite(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_2); + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2); VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded."); /* select the portion of the in memory large cube from which we @@ -1521,88 +1302,66 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, - H5S_SELECT_SET, - tv_ptr->start_ptr, - tv_ptr->stride_ptr, - tv_ptr->count_ptr, - tv_ptr->block_ptr); - VRFY((ret >= 0), - "H5Sselect_hyperslab() mem_large_ds_sid succeeded."); - + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded."); - /* verify that H5S_select_shape_same() reports the in + /* verify that H5Sselect_shape_same() reports the in * memory slice through the cube selection and the * on disk full square selections as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0, - tv_ptr->mem_large_ds_sid); - VRFY((check == TRUE), "H5S_select_shape_same_test passed."); + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed."); - - /* write the slice from the in memory large data set to the + /* write the slice from the in memory large data set to the * slice of the on disk small dataset. */ -#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), - (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), - (int)(tv_ptr->start[4])); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, +#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); #endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - ret = H5Dwrite(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_0); + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded."); - /* read the on disk square into memory */ - ret = H5Dread(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_1); + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); - /* verify that expected data is retrieved */ mis_match = FALSE; - ptr_1 = tv_ptr->small_ds_buf_1; + ptr_1 = tv_ptr->small_ds_buf_1; - expected_value = (uint32_t)( - (i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size; - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - HDassert( start_index < stop_index ); - HDassert( stop_index <= tv_ptr->small_ds_size ); + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->small_ds_size); - for ( n = 0; n < tv_ptr->small_ds_size; n++ ) { + for (n = 0; n < tv_ptr->small_ds_size; n++) { - if ( ( n >= start_index ) && ( n <= stop_index ) ) { + if ((n >= start_index) && (n <= stop_index)) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } expected_value++; + } + else { - } else { - - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { mis_match = TRUE; } @@ -1613,59 +1372,47 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) ptr_1++; } - VRFY((mis_match == FALSE), - "small slice write from large ds data good."); + VRFY((mis_match == FALSE), "small slice write from large ds data good."); (tv_ptr->tests_run)++; } l++; - (tv_ptr->total_tests)++; + (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* contig_hs_dr_pio_test__m2d_l2s() */ - /*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__m2d_s2l() - * - * Purpose: Part four of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can write from memory to file using - * selections of different rank that H5S_select_shape_same() - * views as being of the same shape. + * Function: contig_hs_dr_pio_test__m2d_s2l() * - * Do this by writing the contents of the process's slice of - * the in memory small data set to slices of the on disk - * large data set. After each write, read the process's - * slice of the large data set back into memory, and verify - * that it contains the expected data. + * Purpose: Part four of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. * - * Verify that H5S_select_shape_same() returns true on the - * memory and file selections. + * Verify that we can write from memory to file using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. * - * Return: void + * Do this by writing the contents of the process's slice of + * the in memory small data set to slices of the on disk + * large data set. After each write, read the process's + * slice of the large data set back into memory, and verify + * that it contains the expected data. * - * Programmer: JRM -- 8/10/11 + * Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. * - * Modifications: + * Return: void * - * None + * Programmer: JRM -- 8/10/11 * *------------------------------------------------------------------------- */ @@ -1673,72 +1420,67 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) #define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0 static void -contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) +contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()"; #endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - hbool_t mis_match = FALSE; - int i, j, k, l; - size_t n; - int mpi_rank; /* needed by the VRFY macro */ - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t * ptr_1; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t mis_match = FALSE; + int i, j, k, l; + size_t n; + int mpi_rank; /* needed by the VRFY macro */ + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5S_select_shape_same() returns true on the memory + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory * and file selections. */ - /* select the slice of the in memory small data set associated with + /* select the slice of the in memory small data set associated with * the process's mpi rank. */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; - for ( i = 1; i < tv_ptr->large_rank; i++ ) { + for (i = 1; i < tv_ptr->large_rank; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded"); - + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); /* set up start, stride, count, and block -- note that we will * change start[] so as to write slices of the small data set to * slices of the large data set. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } @@ -1747,48 +1489,46 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) /* zero out the in memory large ds */ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG - HDfprintf(stdout, - "%s writing process slices of small ds to slices of large ds on disk.\n", - fcnName); +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName); #endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -1800,29 +1540,26 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG tv_ptr->start[0] = (hsize_t)i; tv_ptr->start[1] = (hsize_t)j; tv_ptr->start[2] = (hsize_t)k; tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - HDfprintf(stdout, - "%s:%d: skipping test with start = %d %d %d %d %d.\n", - fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), - (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), - (int)(tv_ptr->start[4])); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, + HDfprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName, + (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), + (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); #endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ @@ -1835,14 +1572,10 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) * Note that this will leave one slice with its original data * as there is one more slice than processes. */ - ret = H5Dwrite(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->large_ds_slice_sid, - tv_ptr->file_large_ds_process_slice_sid, - tv_ptr->xfer_plist, + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid, + tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2); - VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded"); - + VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded"); /* select the portion of the in memory large cube to which we * are going to write data. @@ -1853,97 +1586,72 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start_ptr, - tv_ptr->stride_ptr, - tv_ptr->count_ptr, - tv_ptr->block_ptr); - VRFY((ret != FAIL), - "H5Sselect_hyperslab() target large ds slice succeeded"); - + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr, + tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr); + VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded"); - /* verify that H5S_select_shape_same() reports the in + /* verify that H5Sselect_shape_same() reports the in * memory small data set slice selection and the * on disk slice through the large data set selection * as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->mem_small_ds_sid, - tv_ptr->file_large_ds_sid_0); - VRFY((check == TRUE), "H5S_select_shape_same_test passed"); + check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); - - /* write the small data set slice from memory to the - * target slice of the disk data set + /* write the small data set slice from memory to the + * target slice of the disk data set */ -#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, (int)(tv_ptr->mpi_rank), - (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), - (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), - (int)(tv_ptr->start[4])); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, +#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank), + (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]), + (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4])); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); #endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - ret = H5Dwrite(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_small_ds_sid, - tv_ptr->file_large_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_0); - VRFY((ret != FAIL), - "H5Dwrite of small ds slice to large ds succeeded"); - + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); + VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded"); - /* read this processes slice on the on disk large + /* read this processes slice on the on disk large * data set into memory. */ - ret = H5Dread(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_process_slice_sid, - tv_ptr->file_large_ds_process_slice_sid, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_1); - VRFY((ret != FAIL), - "H5Dread() of process slice of large ds succeeded"); - + ret = H5Dread( + tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid, + tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded"); /* verify that the expected data and only the * expected data was read. */ - ptr_1 = tv_ptr->large_ds_buf_1; - expected_value = (uint32_t) - ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - - start_index = (size_t) - ((i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); + ptr_1 = tv_ptr->large_ds_buf_1; + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - HDassert( start_index < stop_index ); - HDassert( stop_index < tv_ptr->large_ds_size ); + HDassert(start_index < stop_index); + HDassert(stop_index < tv_ptr->large_ds_size); - for ( n = 0; n < tv_ptr->large_ds_size; n++ ) { + for (n = 0; n < tv_ptr->large_ds_size; n++) { - if ( ( n >= start_index ) && ( n <= stop_index ) ) { + if ((n >= start_index) && (n <= stop_index)) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } expected_value++; + } + else { - } else { - - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { mis_match = TRUE; } @@ -1953,8 +1661,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) ptr_1++; } - VRFY((mis_match == FALSE), - "small ds slice write to large ds slice data good."); + VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good."); (tv_ptr->tests_run)++; } @@ -1963,47 +1670,25 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* contig_hs_dr_pio_test__m2d_s2l() */ - /*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test__run_test() - * - * Purpose: Test I/O to/from hyperslab selections of different rank in - * the parallel. - * - * Return: void + * Function: contig_hs_dr_pio_test__run_test() * - * Programmer: JRM -- 9/18/09 + * Purpose: Test I/O to/from hyperslab selections of different rank in + * the parallel. * - * Modifications: + * Return: void * - * JRM -- 9/16/10 - * Added express_test parameter. Use it to control whether - * we set up the chunks so that no chunk is shared between - * processes, and also whether we set an alignment when we - * create the test file. - * - * JRM -- 8/11/11 - * Refactored function heavily & broke it into six functions. - * Added the skips_ptr, max_skips, total_tests_ptr, - * tests_run_ptr, and tests_skiped_ptr parameters to support - * skipping portions of the test according to the express - * test value. + * Programmer: JRM -- 9/18/09 * *------------------------------------------------------------------------- */ @@ -2011,27 +1696,16 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) #define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 static void -contig_hs_dr_pio_test__run_test(const int test_num, - const int edge_size, - const int chunk_edge_size, - const int small_rank, - const int large_rank, - const hbool_t use_collective_io, - const hid_t dset_type, - int express_test, - int * skips_ptr, - int max_skips, - int64_t * total_tests_ptr, - int64_t * tests_run_ptr, - int64_t * tests_skipped_ptr) +contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size, + const int small_rank, const int large_rank, const hbool_t use_collective_io, + const hid_t dset_type, int express_test, int *skips_ptr, int max_skips, + int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr) { -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG const char *fcnName = "contig_hs_dr_pio_test__run_test()"; #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - int mpi_rank; - struct hs_dr_pio_test_vars_t test_vars = - { - /* int mpi_size = */ -1, + struct hs_dr_pio_test_vars_t test_vars = { + /* int mpi_size = */ -1, /* int mpi_rank = */ -1, /* MPI_Comm mpi_comm = */ MPI_COMM_NULL, /* MPI_Inf mpi_info = */ MPI_INFO_NULL, @@ -2047,12 +1721,12 @@ contig_hs_dr_pio_test__run_test(const int test_num, /* uint32_t * small_ds_buf_2 = */ NULL, /* uint32_t * small_ds_slice_buf = */ NULL, /* uint32_t * large_ds_buf_0 = */ NULL, - /* uint32_t * large_ds_buf_1 = */ NULL, + /* uint32_t * large_ds_buf_1 = */ NULL, /* uint32_t * large_ds_buf_2 = */ NULL, /* uint32_t * large_ds_slice_buf = */ NULL, /* int small_ds_offset = */ -1, /* int large_ds_offset = */ -1, - /* hid_t fid = */ -1, /* HDF5 file ID */ + /* hid_t fid = */ -1, /* HDF5 file ID */ /* hid_t xfer_plist = */ H5P_DEFAULT, /* hid_t full_mem_small_ds_sid = */ -1, /* hid_t full_file_small_ds_sid = */ -1, @@ -2068,126 +1742,116 @@ contig_hs_dr_pio_test__run_test(const int test_num, /* hid_t file_large_ds_process_slice_sid = */ -1, /* hid_t mem_large_ds_process_slice_sid = */ -1, /* hid_t large_ds_slice_sid = */ -1, - /* hid_t small_dataset = */ -1, /* Dataset ID */ - /* hid_t large_dataset = */ -1, /* Dataset ID */ + /* hid_t small_dataset = */ -1, /* Dataset ID */ + /* hid_t large_dataset = */ -1, /* Dataset ID */ /* size_t small_ds_size = */ 1, /* size_t small_ds_slice_size = */ 1, /* size_t large_ds_size = */ 1, /* size_t large_ds_slice_size = */ 1, - /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, + /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, /* hsize_t * start_ptr = */ NULL, /* hsize_t * stride_ptr = */ NULL, /* hsize_t * count_ptr = */ NULL, /* hsize_t * block_ptr = */ NULL, - /* int skips = */ 0, - /* int max_skips = */ 0, + /* int skips = */ 0, + /* int max_skips = */ 0, /* int64_t total_tests = */ 0, /* int64_t tests_run = */ 0, - /* int64_t tests_skipped = */ 0 - }; - struct hs_dr_pio_test_vars_t * tv_ptr = &test_vars; + /* int64_t tests_skipped = */ 0}; + struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; - hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, - small_rank, large_rank, use_collective_io, + hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io, dset_type, express_test, tv_ptr); - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; - /* initialize skips & max_skips */ - tv_ptr->skips = *skips_ptr; + tv_ptr->skips = *skips_ptr; tv_ptr->max_skips = max_skips; -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { - HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", - test_num, small_rank, large_rank); +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank); HDfprintf(stdout, "test %d: Initialization complete.\n", test_num); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ /* first, verify that we can read from disk correctly using selections - * of different rank that H5S_select_shape_same() views as being of the + * of different rank that H5Sselect_shape_same() views as being of the * same shape. * - * Start by reading small_rank - 1 dimensional slice from the on disk - * large cube, and verifying that the data read is correct. Verify that - * H5S_select_shape_same() returns true on the memory and file selections. + * Start by reading small_rank - 1 dimensional slice from the on disk + * large cube, and verifying that the data read is correct. Verify that + * H5Sselect_shape_same() returns true on the memory and file selections. */ -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ contig_hs_dr_pio_test__d2m_l2s(tv_ptr); - - /* Second, read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct + /* Second, read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct * data (and only the correct data) is read. */ -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ contig_hs_dr_pio_test__d2m_s2l(tv_ptr); - /* now we go in the opposite direction, verifying that we can write * from memory to file using selections of different rank that - * H5S_select_shape_same() views as being of the same shape. + * H5Sselect_shape_same() views as being of the same shape. * * Start by writing small_rank - 1 D slices from the in memory large data - * set to the on disk small cube dataset. After each write, read the - * slice of the small dataset back from disk, and verify that it contains - * the expected data. Verify that H5S_select_shape_same() returns true on + * set to the on disk small cube dataset. After each write, read the + * slice of the small dataset back from disk, and verify that it contains + * the expected data. Verify that H5Sselect_shape_same() returns true on * the memory and file selections. */ -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ contig_hs_dr_pio_test__m2d_l2s(tv_ptr); - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5S_select_shape_same() returns true on the memory + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory * and file selections. */ -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ contig_hs_dr_pio_test__m2d_s2l(tv_ptr); -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { - HDfprintf(stdout, - "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", - test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), - (long long)(tv_ptr->total_tests)); +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", + test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), + (long long)(tv_ptr->total_tests)); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ hs_dr_pio_test__takedown(tv_ptr); -#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { +#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG + if (MAINPROCESS) { HDfprintf(stdout, "test %d: Takedown complete.\n", test_num); } #endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ @@ -2201,61 +1865,46 @@ contig_hs_dr_pio_test__run_test(const int test_num, } /* contig_hs_dr_pio_test__run_test() */ - /*------------------------------------------------------------------------- - * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) - * - * Purpose: Test I/O to/from hyperslab selections of different rank in - * the parallel case. - * - * Return: void - * - * Programmer: JRM -- 9/18/09 + * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) * - * Modifications: + * Purpose: Test I/O to/from hyperslab selections of different rank in + * the parallel case. * - * Modified function to take a sample of the run times - * of the different tests, and skip some of them if - * run times are too long. + * Return: void * - * We need to do this because Lustre runns very slowly - * if two or more processes are banging on the same - * block of memory. - * JRM -- 9/10/10 - * Break this one big test into 4 smaller tests according - * to {independent,collective}x{contigous,chunked} datasets. - * AKC -- 2010/01/14 + * Programmer: JRM -- 9/18/09 * *------------------------------------------------------------------------- */ #define CONTIG_HS_DR_PIO_TEST__DEBUG 0 -void +static void contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) { - int express_test; - int local_express_test; - int mpi_rank = -1; - int mpi_size; - int test_num = 0; - int edge_size; - int chunk_edge_size = 0; - int small_rank; - int large_rank; - int mpi_result; - int skips = 0; - int max_skips = 0; - /* The following table list the number of sub-tests skipped between - * each test that is actually executed as a function of the express + int express_test; + int local_express_test; + int mpi_rank = -1; + int mpi_size; + int test_num = 0; + int edge_size; + int chunk_edge_size = 0; + int small_rank; + int large_rank; + int mpi_result; + int skips = 0; + int max_skips = 0; + /* The following table list the number of sub-tests skipped between + * each test that is actually executed as a function of the express * test level. Note that any value in excess of 4880 will cause all * sub tests to be skipped. */ - int max_skips_tbl[4] = {0, 4, 64, 1024}; - hid_t dset_type = H5T_NATIVE_UINT; - int64_t total_tests = 0; - int64_t tests_run = 0; - int64_t tests_skipped = 0; + int max_skips_tbl[4] = {0, 4, 64, 1024}; + hid_t dset_type = H5T_NATIVE_UINT; + int64_t total_tests = 0; + int64_t tests_run = 0; + int64_t tests_skipped = 0; HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); @@ -2266,45 +1915,33 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) local_express_test = GetTestExpress(); - mpi_result = MPI_Allreduce((void *)&local_express_test, - (void *)&express_test, - 1, - MPI_INT, - MPI_MAX, + mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS ), "MPI_Allreduce(0) succeeded"); + VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded"); - if ( local_express_test < 0 ) { + if (local_express_test < 0) { max_skips = max_skips_tbl[0]; - } else if ( local_express_test > 3 ) { + } + else if (local_express_test > 3) { max_skips = max_skips_tbl[3]; - } else { + } + else { max_skips = max_skips_tbl[local_express_test]; } - for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) { + for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) { - for ( small_rank = 2; small_rank < large_rank; small_rank++ ) { + for (small_rank = 2; small_rank < large_rank; small_rank++) { - switch(sstest_type){ + switch (sstest_type) { case IND_CONTIG: /* contiguous data set, independent I/O */ chunk_edge_size = 0; - contig_hs_dr_pio_test__run_test(test_num, - edge_size, - chunk_edge_size, - small_rank, - large_rank, - FALSE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, - &tests_skipped); + contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, + large_rank, FALSE, dset_type, express_test, &skips, + max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case IND_CONTIG */ @@ -2313,19 +1950,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* contiguous data set, collective I/O */ chunk_edge_size = 0; - contig_hs_dr_pio_test__run_test(test_num, - edge_size, - chunk_edge_size, - small_rank, - large_rank, - TRUE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, - &tests_skipped); + contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, + large_rank, TRUE, dset_type, express_test, &skips, + max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case COL_CONTIG */ @@ -2334,19 +1961,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* chunked data set, independent I/O */ chunk_edge_size = 5; - contig_hs_dr_pio_test__run_test(test_num, - edge_size, - chunk_edge_size, - small_rank, - large_rank, - FALSE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, - &tests_skipped); + contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, + large_rank, FALSE, dset_type, express_test, &skips, + max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case IND_CHUNKED */ @@ -2355,19 +1972,9 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) /* chunked data set, collective I/O */ chunk_edge_size = 5; - contig_hs_dr_pio_test__run_test(test_num, - edge_size, - chunk_edge_size, - small_rank, - large_rank, - TRUE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, - &tests_skipped); + contig_hs_dr_pio_test__run_test(test_num, edge_size, chunk_edge_size, small_rank, + large_rank, TRUE, dset_type, express_test, &skips, + max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case COL_CHUNKED */ @@ -2378,16 +1985,16 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) } /* end of switch(sstest_type) */ #if CONTIG_HS_DR_PIO_TEST__DEBUG - if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) { - HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", - tests_run, tests_skipped, total_tests); + if ((MAINPROCESS) && (tests_skipped > 0)) { + HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped, + total_tests); } #endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */ } } - if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) { - HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n", + if ((MAINPROCESS) && (tests_skipped > 0)) { + HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", tests_skipped, total_tests); } @@ -2395,81 +2002,74 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type) } /* contig_hs_dr_pio_test() */ - /**************************************************************** ** -** ckrbrd_hs_dr_pio_test__slct_ckrbrd(): -** Given a data space of tgt_rank, and dimensions: +** ckrbrd_hs_dr_pio_test__slct_ckrbrd(): +** Given a dataspace of tgt_rank, and dimensions: ** -** (mpi_size + 1), edge_size, ... , edge_size +** (mpi_size + 1), edge_size, ... , edge_size ** -** edge_size, and a checker_edge_size, select a checker -** board selection of a sel_rank (sel_rank < tgt_rank) -** dimensional slice through the data space parallel to the -** sel_rank fastest changing indicies, with origin (in the -** higher indicies) as indicated by the start array. +** edge_size, and a checker_edge_size, select a checker +** board selection of a sel_rank (sel_rank < tgt_rank) +** dimensional slice through the dataspace parallel to the +** sel_rank fastest changing indices, with origin (in the +** higher indices) as indicated by the start array. ** -** Note that this function, like all its relatives, is -** hard coded to presume a maximum data space rank of 5. -** While this maximum is declared as a constant, increasing -** it will require extensive coding in addition to changing +** Note that this function, like all its relatives, is +** hard coded to presume a maximum dataspace rank of 5. +** While this maximum is declared as a constant, increasing +** it will require extensive coding in addition to changing ** the value of the constant. ** -** JRM -- 10/8/09 +** JRM -- 10/8/09 ** ****************************************************************/ #define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, - const hid_t tgt_sid, - const int tgt_rank, - const int edge_size, - const int checker_edge_size, - const int sel_rank, +ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank, + const int edge_size, const int checker_edge_size, const int sel_rank, hsize_t sel_start[]) { -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - const char * fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():"; -#endif - hbool_t first_selection = TRUE; - int i, j, k, l, m; - int n_cube_offset; - int sel_offset; - const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */ - /* this changes */ - hsize_t base_count; - hsize_t offset_count; - hsize_t start[PAR_SS_DR_MAX_RANK]; - hsize_t stride[PAR_SS_DR_MAX_RANK]; - hsize_t count[PAR_SS_DR_MAX_RANK]; - hsize_t block[PAR_SS_DR_MAX_RANK]; - herr_t ret; /* Generic return value */ - - HDassert( edge_size >= 6 ); - HDassert( 0 < checker_edge_size ); - HDassert( checker_edge_size <= edge_size ); - HDassert( 0 < sel_rank ); - HDassert( sel_rank <= tgt_rank ); - HDassert( tgt_rank <= test_max_rank ); - HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK ); +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():"; +#endif + hbool_t first_selection = TRUE; + int i, j, k, l, m; + int n_cube_offset; + int sel_offset; + const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */ + /* this changes */ + hsize_t base_count; + hsize_t offset_count; + hsize_t start[PAR_SS_DR_MAX_RANK]; + hsize_t stride[PAR_SS_DR_MAX_RANK]; + hsize_t count[PAR_SS_DR_MAX_RANK]; + hsize_t block[PAR_SS_DR_MAX_RANK]; + herr_t ret; /* Generic return value */ + + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(0 < sel_rank); + HDassert(sel_rank <= tgt_rank); + HDassert(tgt_rank <= test_max_rank); + HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK); sel_offset = test_max_rank - sel_rank; - HDassert( sel_offset >= 0 ); + HDassert(sel_offset >= 0); n_cube_offset = test_max_rank - tgt_rank; - HDassert( n_cube_offset >= 0 ); - HDassert( n_cube_offset <= sel_offset ); - -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", - fcnName, mpi_rank, edge_size, checker_edge_size); - HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", - fcnName, mpi_rank, sel_rank, sel_offset); - HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", - fcnName, mpi_rank, tgt_rank, n_cube_offset); -#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ + HDassert(n_cube_offset >= 0); + HDassert(n_cube_offset <= sel_offset); + +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size, + checker_edge_size); + HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset); + HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset); +#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ /* First, compute the base count (which assumes start == 0 * for the associated offset) and offset_count (which @@ -2486,234 +2086,204 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, base_count = (hsize_t)(edge_size / (checker_edge_size * 2)); - if ( (edge_size % (checker_edge_size * 2)) > 0 ) { + if ((edge_size % (checker_edge_size * 2)) > 0) { base_count++; } offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2)); - if ( ((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0 ) { + if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) { offset_count++; } /* Now set up the stride and block arrays, and portions of the start - * and count arrays that will not be altered during the selection of + * and count arrays that will not be altered during the selection of * the checker board. */ i = 0; - while ( i < n_cube_offset ) { + while (i < n_cube_offset) { /* these values should never be used */ - start[i] = 0; + start[i] = 0; stride[i] = 0; - count[i] = 0; - block[i] = 0; + count[i] = 0; + block[i] = 0; i++; } - while ( i < sel_offset ) { + while (i < sel_offset) { - start[i] = sel_start[i]; + start[i] = sel_start[i]; stride[i] = (hsize_t)(2 * edge_size); - count[i] = 1; - block[i] = 1; + count[i] = 1; + block[i] = 1; i++; } - while ( i < test_max_rank ) { + while (i < test_max_rank) { stride[i] = (hsize_t)(2 * checker_edge_size); - block[i] = (hsize_t)checker_edge_size; + block[i] = (hsize_t)checker_edge_size; i++; } - + i = 0; do { - if ( 0 >= sel_offset ) { + if (0 >= sel_offset) { - if ( i == 0 ) { + if (i == 0) { start[0] = 0; count[0] = base_count; - - } else { + } + else { start[0] = (hsize_t)checker_edge_size; count[0] = offset_count; - } } j = 0; - do { - if ( 1 >= sel_offset ) { + do { + if (1 >= sel_offset) { - if ( j == 0 ) { + if (j == 0) { start[1] = 0; count[1] = base_count; - - } else { + } + else { start[1] = (hsize_t)checker_edge_size; count[1] = offset_count; - } } k = 0; do { - if ( 2 >= sel_offset ) { + if (2 >= sel_offset) { - if ( k == 0 ) { + if (k == 0) { start[2] = 0; count[2] = base_count; - - } else { + } + else { start[2] = (hsize_t)checker_edge_size; count[2] = offset_count; - } } l = 0; do { - if ( 3 >= sel_offset ) { + if (3 >= sel_offset) { - if ( l == 0 ) { + if (l == 0) { start[3] = 0; count[3] = base_count; - - } else { + } + else { start[3] = (hsize_t)checker_edge_size; count[3] = offset_count; - } } m = 0; do { - if ( 4 >= sel_offset ) { + if (4 >= sel_offset) { - if ( m == 0 ) { + if (m == 0) { start[4] = 0; count[4] = base_count; - - } else { + } + else { start[4] = (hsize_t)checker_edge_size; count[4] = offset_count; - } } - if ( ((i + j + k + l + m) % 2) == 0 ) { - -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", - fcnName, mpi_rank, (int)first_selection); - HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", - fcnName, mpi_rank, i, j, k, l, m); - HDfprintf(stdout, - "%s:%d: start = %d %d %d %d %d.\n", - fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - HDfprintf(stdout, - "%s:%d: stride = %d %d %d %d %d.\n", - fcnName, mpi_rank, (int)stride[0], (int)stride[1], - (int)stride[2], (int)stride[3], (int)stride[4]); - HDfprintf(stdout, - "%s:%d: count = %d %d %d %d %d.\n", - fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - HDfprintf(stdout, - "%s:%d: block = %d %d %d %d %d.\n", - fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); - HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", - fcnName, mpi_rank, + if (((i + j + k + l + m) % 2) == 0) { + +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank, + (int)first_selection); + HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j, + k, l, m); + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)start[0], (int)start[1], (int)start[2], (int)start[3], + (int)start[4]); + HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3], + (int)stride[4]); + HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)count[0], (int)count[1], (int)count[2], (int)count[3], + (int)count[4]); + HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)block[0], (int)block[1], (int)block[2], (int)block[3], + (int)block[4]); + HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank, H5Sget_simple_extent_ndims(tgt_sid)); - HDfprintf(stdout, "%s:%d: selection rank = %d.\n", - fcnName, mpi_rank, sel_rank); + HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank); #endif - if ( first_selection ) { + if (first_selection) { + + first_selection = FALSE; - first_selection = FALSE; + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]), + &(stride[n_cube_offset]), &(count[n_cube_offset]), + &(block[n_cube_offset])); - ret = H5Sselect_hyperslab - ( - tgt_sid, - H5S_SELECT_SET, - &(start[n_cube_offset]), - &(stride[n_cube_offset]), - &(count[n_cube_offset]), - &(block[n_cube_offset]) - ); - VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded"); + } + else { - } else { - - ret = H5Sselect_hyperslab - ( - tgt_sid, - H5S_SELECT_OR, - &(start[n_cube_offset]), - &(stride[n_cube_offset]), - &(count[n_cube_offset]), - &(block[n_cube_offset]) - ); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]), + &(stride[n_cube_offset]), &(count[n_cube_offset]), + &(block[n_cube_offset])); + VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); } } m++; - } while ( ( m <= 1 ) && - ( 4 >= sel_offset ) ); + } while ((m <= 1) && (4 >= sel_offset)); l++; - } while ( ( l <= 1 ) && - ( 3 >= sel_offset ) ); + } while ((l <= 1) && (3 >= sel_offset)); k++; - } while ( ( k <= 1 ) && - ( 2 >= sel_offset ) ); + } while ((k <= 1) && (2 >= sel_offset)); j++; - } while ( ( j <= 1 ) && - ( 1 >= sel_offset ) ); - + } while ((j <= 1) && (1 >= sel_offset)); i++; - } while ( ( i <= 1 ) && - ( 0 >= sel_offset ) ); + } while ((i <= 1) && (0 >= sel_offset)); -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", - fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid)); +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); #endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ - /* Clip the selection back to the data space proper. */ + /* Clip the selection back to the dataspace proper. */ - for ( i = 0; i < test_max_rank; i++ ) { + for (i = 0; i < test_max_rank; i++) { start[i] = 0; stride[i] = (hsize_t)edge_size; @@ -2721,14 +2291,13 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, block[i] = (hsize_t)edge_size; } - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, - start, stride, count, block); + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block); VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded"); -#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG - HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", - fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid)); +#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank); #endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */ @@ -2736,96 +2305,92 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, } /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */ - /**************************************************************** ** -** ckrbrd_hs_dr_pio_test__verify_data(): +** ckrbrd_hs_dr_pio_test__verify_data(): ** -** Examine the supplied buffer to see if it contains the -** expected data. Return TRUE if it does, and FALSE +** Examine the supplied buffer to see if it contains the +** expected data. Return TRUE if it does, and FALSE ** otherwise. ** -** The supplied buffer is presumed to this process's slice -** of the target data set. Each such slice will be an -** n-cube of rank (rank -1) and the supplied edge_size with -** origin (mpi_rank, 0, ... , 0) in the target data set. +** The supplied buffer is presumed to this process's slice +** of the target data set. Each such slice will be an +** n-cube of rank (rank -1) and the supplied edge_size with +** origin (mpi_rank, 0, ... , 0) in the target data set. ** -** Further, the buffer is presumed to be the result of reading -** or writing a checker board selection of an m (1 <= m < +** Further, the buffer is presumed to be the result of reading +** or writing a checker board selection of an m (1 <= m < ** rank) dimensional slice through this processes slice -** of the target data set. Also, this slice must be parallel -** to the fastest changing indicies. +** of the target data set. Also, this slice must be parallel +** to the fastest changing indices. ** -** It is further presumed that the buffer was zeroed before -** the read/write, and that the full target data set (i.e. -** the buffer/data set for all processes) was initialized -** with the natural numbers listed in order from the origin -** along the fastest changing axis. +** It is further presumed that the buffer was zeroed before +** the read/write, and that the full target data set (i.e. +** the buffer/data set for all processes) was initialized +** with the natural numbers listed in order from the origin +** along the fastest changing axis. ** ** Thus for a 20x10x10 dataset, the value stored in location -** (x, y, z) (assuming that z is the fastest changing index -** and x the slowest) is assumed to be: +** (x, y, z) (assuming that z is the fastest changing index +** and x the slowest) is assumed to be: ** -** (10 * 10 * x) + (10 * y) + z +** (10 * 10 * x) + (10 * y) + z ** -** Further, supposing that this is process 10, this process's -** slice of the dataset would be a 10 x 10 2-cube with origin -** (10, 0, 0) in the data set, and would be initialize (prior -** to the checkerboard selection) as follows: +** Further, supposing that this is process 10, this process's +** slice of the dataset would be a 10 x 10 2-cube with origin +** (10, 0, 0) in the data set, and would be initialize (prior +** to the checkerboard selection) as follows: ** -** 1000, 1001, 1002, ... 1008, 1009 -** 1010, 1011, 1012, ... 1018, 1019 -** . . . . . -** . . . . . -** . . . . . -** 1090, 1091, 1092, ... 1098, 1099 +** 1000, 1001, 1002, ... 1008, 1009 +** 1010, 1011, 1012, ... 1018, 1019 +** . . . . . +** . . . . . +** . . . . . +** 1090, 1091, 1092, ... 1098, 1099 ** -** In the case of a read from the processors slice of another -** data set of different rank, the values expected will have -** to be adjusted accordingly. This is done via the -** first_expected_val parameter. +** In the case of a read from the processors slice of another +** data set of different rank, the values expected will have +** to be adjusted accordingly. This is done via the +** first_expected_val parameter. ** -** Finally, the function presumes that the first element -** of the buffer resides either at the origin of either -** a selected or an unselected checker. (Translation: -** if partial checkers appear in the buffer, they will -** intersect the edges of the n-cube oposite the origin.) +** Finally, the function presumes that the first element +** of the buffer resides either at the origin of either +** a selected or an unselected checker. (Translation: +** if partial checkers appear in the buffer, they will +** intersect the edges of the n-cube opposite the origin.) ** ****************************************************************/ #define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0 static hbool_t -ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr, - const int rank, - const int edge_size, - const int checker_edge_size, - uint32_t first_expected_val, +ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size, + const int checker_edge_size, uint32_t first_expected_val, hbool_t buf_starts_in_checker) { #if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - const char * fcnName = "ckrbrd_hs_dr_pio_test__verify_data():"; + const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():"; #endif - hbool_t good_data = TRUE; - hbool_t in_checker; - hbool_t start_in_checker[5]; - uint32_t expected_value; - uint32_t * val_ptr; - int i, j, k, l, m; /* to track position in n-cube */ - int v, w, x, y, z; /* to track position in checker */ + hbool_t good_data = TRUE; + hbool_t in_checker; + hbool_t start_in_checker[5]; + uint32_t expected_value; + uint32_t *val_ptr; + int i, j, k, l, m; /* to track position in n-cube */ + int v, w, x, y, z; /* to track position in checker */ const int test_max_rank = 5; /* code changes needed if this is increased */ - HDassert( buf_ptr != NULL ); - HDassert( 0 < rank ); - HDassert( rank <= test_max_rank ); - HDassert( edge_size >= 6 ); - HDassert( 0 < checker_edge_size ); - HDassert( checker_edge_size <= edge_size ); - HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK ); + HDassert(buf_ptr != NULL); + HDassert(0 < rank); + HDassert(rank <= test_max_rank); + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK); -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - int mpi_rank; + int mpi_rank; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank); @@ -2837,144 +2402,128 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr, } #endif - val_ptr = buf_ptr; - expected_value = first_expected_val; +val_ptr = buf_ptr; +expected_value = first_expected_val; - i = 0; - v = 0; - start_in_checker[0] = buf_starts_in_checker; - do - { - if ( v >= checker_edge_size ) { +i = 0; +v = 0; +start_in_checker[0] = buf_starts_in_checker; +do { + if (v >= checker_edge_size) { - start_in_checker[0] = ! start_in_checker[0]; - v = 0; + start_in_checker[0] = !start_in_checker[0]; + v = 0; + } + + j = 0; + w = 0; + start_in_checker[1] = start_in_checker[0]; + do { + if (w >= checker_edge_size) { + + start_in_checker[1] = !start_in_checker[1]; + w = 0; } - j = 0; - w = 0; - start_in_checker[1] = start_in_checker[0]; - do - { - if ( w >= checker_edge_size ) { - - start_in_checker[1] = ! start_in_checker[1]; - w = 0; + k = 0; + x = 0; + start_in_checker[2] = start_in_checker[1]; + do { + if (x >= checker_edge_size) { + + start_in_checker[2] = !start_in_checker[2]; + x = 0; } - k = 0; - x = 0; - start_in_checker[2] = start_in_checker[1]; - do - { - if ( x >= checker_edge_size ) { - - start_in_checker[2] = ! start_in_checker[2]; - x = 0; - } + l = 0; + y = 0; + start_in_checker[3] = start_in_checker[2]; + do { + if (y >= checker_edge_size) { - l = 0; - y = 0; - start_in_checker[3] = start_in_checker[2]; - do - { - if ( y >= checker_edge_size ) { - - start_in_checker[3] = ! start_in_checker[3]; - y = 0; - } + start_in_checker[3] = !start_in_checker[3]; + y = 0; + } - m = 0; - z = 0; -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); + m = 0; + z = 0; +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); #endif - in_checker = start_in_checker[3]; - do - { -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - HDfprintf(stdout, " %d", (int)(*val_ptr)); + in_checker = start_in_checker[3]; + do { +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + HDfprintf(stdout, " %d", (int)(*val_ptr)); #endif - if ( z >= checker_edge_size ) { + if (z >= checker_edge_size) { - in_checker = ! in_checker; - z = 0; - } - - if ( in_checker ) { - - if ( *val_ptr != expected_value ) { + in_checker = !in_checker; + z = 0; + } - good_data = FALSE; - } - - /* zero out buffer for re-use */ - *val_ptr = 0; + if (in_checker) { - } else if ( *val_ptr != 0 ) { + if (*val_ptr != expected_value) { good_data = FALSE; - - /* zero out buffer for re-use */ - *val_ptr = 0; - } - val_ptr++; - expected_value++; - m++; - z++; - - } while ( ( rank >= (test_max_rank - 4) ) && - ( m < edge_size ) ); -#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG - HDfprintf(stdout, "\n"); + /* zero out buffer for re-use */ + *val_ptr = 0; + } + else if (*val_ptr != 0) { + + good_data = FALSE; + + /* zero out buffer for re-use */ + *val_ptr = 0; + } + + val_ptr++; + expected_value++; + m++; + z++; + + } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); +#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG + HDfprintf(stdout, "\n"); #endif - l++; - y++; - } while ( ( rank >= (test_max_rank - 3) ) && - ( l < edge_size ) ); - k++; - x++; - } while ( ( rank >= (test_max_rank - 2) ) && - ( k < edge_size ) ); - j++; - w++; - } while ( ( rank >= (test_max_rank - 1) ) && - ( j < edge_size ) ); - i++; - v++; - } while ( ( rank >= test_max_rank ) && - ( i < edge_size ) ); + l++; + y++; + } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); + k++; + x++; + } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); + j++; + w++; + } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); + i++; + v++; +} while ((rank >= test_max_rank) && (i < edge_size)); - return(good_data); +return (good_data); } /* ckrbrd_hs_dr_pio_test__verify_data() */ - /*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__d2m_l2s() - * - * Purpose: Part one of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. + * Function: ckrbrd_hs_dr_pio_test__d2m_l2s() * - * Verify that we can read from disk correctly using checker - * board selections of different rank that - * H5S_select_shape_same() views as being of the same shape. + * Purpose: Part one of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. * - * In this function, we test this by reading small_rank - 1 - * checker board slices from the on disk large cube, and - * verifying that the data read is correct. Verify that - * H5S_select_shape_same() returns true on the memory and - * file selections. + * Verify that we can read from disk correctly using checker + * board selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. * - * Return: void + * In this function, we test this by reading small_rank - 1 + * checker board slices from the on disk large cube, and + * verifying that the data read is correct. Verify that + * H5Sselect_shape_same() returns true on the memory and + * file selections. * - * Programmer: JRM -- 9/15/11 + * Return: void * - * Modifications: - * - * None. + * Programmer: JRM -- 9/15/11 * *------------------------------------------------------------------------- */ @@ -2982,31 +2531,30 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr, #define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) +ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()"; - uint32_t * ptr_0; + uint32_t *ptr_0; #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - hbool_t data_ok = FALSE; - int i, j, k, l; - uint32_t expected_value; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t data_ok = FALSE; + int i, j, k, l; + uint32_t expected_value; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - /* first, verify that we can read from disk correctly using selections - * of different rank that H5S_select_shape_same() views as being of the + * of different rank that H5Sselect_shape_same() views as being of the * same shape. * - * Start by reading a (small_rank - 1)-D checker board slice from this - * processes slice of the on disk large data set, and verifying that the - * data read is correct. Verify that H5S_select_shape_same() returns + * Start by reading a (small_rank - 1)-D checker board slice from this + * processes slice of the on disk large data set, and verifying that the + * data read is correct. Verify that H5Sselect_shape_same() returns * true on the memory and file selections. * * The first step is to set up the needed checker board selection in the @@ -3014,96 +2562,90 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) */ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, - tv_ptr->small_ds_slice_sid, - tv_ptr->small_rank - 1, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, sel_start); /* zero out the buffer we will be reading into */ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size); -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", - fcnName, tv_ptr->mpi_rank); +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank); ptr_0 = tv_ptr->small_ds_slice_buf; - for ( i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++ ) { + for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) { HDfprintf(stdout, "%d ", (int)(*ptr_0)); ptr_0++; } HDfprintf(stdout, "\n"); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ /* set up start, stride, count, and block -- note that we will * change start[] so as to read slices of the large cube. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } } -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - HDfprintf(stdout, - "%s:%d: reading slice from big ds on disk into small ds slice.\n", - fcnName, tv_ptr->mpi_rank); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName, + tv_ptr->mpi_rank); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ /* in serial versions of this test, we loop through all the dimensions - * of the large data set. However, in the parallel version, each + * of the large data set. However, in the parallel version, each * process only works with that slice of the large cube indicated - * by its rank -- hence we set the most slowly changing index to - * mpi_rank, and don't itterate over it. + * by its rank -- hence we set the most slowly changing index to + * mpi_rank, and don't iterate over it. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -3115,16 +2657,16 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; - - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ - /* we know that small_rank - 1 >= 1 and that - * large_rank > small_rank by the assertions at the head + /* we know that small_rank - 1 >= 1 and that + * large_rank > small_rank by the assertions at the head * of this function. Thus no need for another inner loop. */ tv_ptr->start[0] = (hsize_t)i; @@ -3133,76 +2675,54 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd - ( - tv_ptr->mpi_rank, - tv_ptr->file_large_ds_sid_0, - tv_ptr->large_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, - tv_ptr->start - ); - - /* verify that H5S_select_shape_same() reports the two + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the two * selections as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->small_ds_slice_sid, - tv_ptr->file_large_ds_sid_0); - VRFY((check == TRUE), "H5S_select_shape_same_test passed"); - + check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); /* Read selection from disk */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, - tv_ptr->mpi_rank, tv_ptr->start[0], tv_ptr->start[1], - tv_ptr->start[2], tv_ptr->start[3], tv_ptr->start[4]); - HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", - fcnName, +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName, H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid), H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0)); -#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ - - ret = H5Dread(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->small_ds_slice_sid, - tv_ptr->file_large_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_slice_buf); +#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ + + ret = + H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf); VRFY((ret >= 0), "H5Dread() slice from large ds succeeded."); -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG - HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", - fcnName, tv_ptr->mpi_rank); +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG + HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */ /* verify that expected data is retrieved */ - expected_value = (uint32_t) - ((i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); - - data_ok = ckrbrd_hs_dr_pio_test__verify_data - ( - tv_ptr->small_ds_slice_buf, - tv_ptr->small_rank - 1, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - expected_value, - (hbool_t)TRUE - ); - - VRFY((data_ok == TRUE), - "small slice read from large ds data good."); + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + + data_ok = ckrbrd_hs_dr_pio_test__verify_data( + tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size, + tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE); + + VRFY((data_ok == TRUE), "small slice read from large ds data good."); (tv_ptr->tests_run)++; } @@ -3211,45 +2731,34 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* ckrbrd_hs_dr_pio_test__d2m_l2s() */ - /*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__d2m_s2l() - * - * Purpose: Part two of a series of tests of I/O to/from hyperslab - * selections of different rank in the parallel. - * - * Verify that we can read from disk correctly using - * selections of different rank that H5S_select_shape_same() - * views as being of the same shape. + * Function: ckrbrd_hs_dr_pio_test__d2m_s2l() * - * In this function, we test this by reading checker board - * slices of the on disk small data set into slices through - * the in memory large data set, and verify that the correct - * data (and only the correct data) is read. + * Purpose: Part two of a series of tests of I/O to/from hyperslab + * selections of different rank in the parallel. * - * Return: void + * Verify that we can read from disk correctly using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. * - * Programmer: JRM -- 8/15/11 + * In this function, we test this by reading checker board + * slices of the on disk small data set into slices through + * the in memory large data set, and verify that the correct + * data (and only the correct data) is read. * - * Modifications: + * Return: void * - * None. + * Programmer: JRM -- 8/15/11 * *------------------------------------------------------------------------- */ @@ -3257,47 +2766,40 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) #define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) +ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()"; #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - hbool_t data_ok = FALSE; - int i, j, k, l; - size_t u; - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t * ptr_1; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t data_ok = FALSE; + int i, j, k, l; + size_t u; + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - - /* similarly, read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct + /* similarly, read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct * data (and only the correct data) is read. */ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, - tv_ptr->file_small_ds_sid_0, - tv_ptr->small_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, sel_start); -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG - HDfprintf(stdout, - "%s reading slices of on disk small data set into slices of big data set.\n", - fcnName); +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ /* zero out the buffer we will be reading into */ @@ -3305,70 +2807,69 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) /* set up start, stride, count, and block -- note that we will * change start[] so as to read the slice of the small data set - * into different slices of the process slice of the large data + * into different slices of the process slice of the large data * set. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } } /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. + * of the large data set that don't appear in the small data set. * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't itterate + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate * over it. */ - - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -3380,11 +2881,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; - - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ @@ -3398,80 +2899,62 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd - ( - tv_ptr->mpi_rank, - tv_ptr->mem_large_ds_sid, - tv_ptr->large_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, - tv_ptr->start - ); - - - /* verify that H5S_select_shape_same() reports the two + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the two * selections as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0, - tv_ptr->mem_large_ds_sid); - VRFY((check == TRUE), "H5S_select_shape_same_test passed"); - + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); /* Read selection from disk */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], - tv_ptr->start[3], tv_ptr->start[4]); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid), H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0)); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - ret = H5Dread(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_1); + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); /* verify that the expected data and only the * expected data was read. */ - data_ok = TRUE; - ptr_1 = tv_ptr->large_ds_buf_1; - expected_value = - (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - start_index = (size_t)( - (i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); + data_ok = TRUE; + ptr_1 = tv_ptr->large_ds_buf_1; + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); stop_index = start_index + tv_ptr->small_ds_slice_size - 1; -#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG +#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG { int m, n; - HDfprintf(stdout, "%s:%d: expected_value = %d.\n", - fcnName, tv_ptr->mpi_rank, expected_value); - HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, start_index, stop_index); + HDfprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank, + expected_value); + HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank, + start_index, stop_index); n = 0; - for ( m = 0; (unsigned)m < tv_ptr->large_ds_size; m ++ ) { + for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) { HDfprintf(stdout, "%d ", (int)(*ptr_1)); ptr_1++; n++; - if ( n >= tv_ptr->edge_size ) { + if (n >= tv_ptr->edge_size) { HDfprintf(stdout, "\n"); n = 0; } @@ -3481,12 +2964,12 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) } #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */ - HDassert( start_index < stop_index ); - HDassert( stop_index <= tv_ptr->large_ds_size ); + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->large_ds_size); - for ( u = 0; u < start_index; u++ ) { + for (u = 0; u < start_index; u++) { - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { data_ok = FALSE; } @@ -3497,28 +2980,19 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) ptr_1++; } - VRFY((data_ok == TRUE), - "slice read from small to large ds data good(1)."); - - data_ok = ckrbrd_hs_dr_pio_test__verify_data - ( - ptr_1, - tv_ptr->small_rank - 1, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - expected_value, - (hbool_t)TRUE - ); + VRFY((data_ok == TRUE), "slice read from small to large ds data good(1)."); - VRFY((data_ok == TRUE), - "slice read from small to large ds data good(2)."); + data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1, + tv_ptr->edge_size, tv_ptr->checker_edge_size, + expected_value, (hbool_t)TRUE); + VRFY((data_ok == TRUE), "slice read from small to large ds data good(2)."); ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1; - for ( u = stop_index + 1; u < tv_ptr->large_ds_size; u++ ) { + for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) { - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { data_ok = FALSE; } @@ -3529,8 +3003,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) ptr_1++; } - VRFY((data_ok == TRUE), - "slice read from small to large ds data good(3)."); + VRFY((data_ok == TRUE), "slice read from small to large ds data good(3)."); (tv_ptr->tests_run)++; } @@ -3539,49 +3012,38 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* ckrbrd_hs_dr_pio_test__d2m_s2l() */ - /*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__m2d_l2s() + * Function: ckrbrd_hs_dr_pio_test__m2d_l2s() * - * Purpose: Part three of a series of tests of I/O to/from checker - * board hyperslab selections of different rank in the - * parallel. + * Purpose: Part three of a series of tests of I/O to/from checker + * board hyperslab selections of different rank in the + * parallel. * - * Verify that we can write from memory to file using checker - * board selections of different rank that - * H5S_select_shape_same() views as being of the same shape. + * Verify that we can write from memory to file using checker + * board selections of different rank that + * H5Sselect_shape_same() views as being of the same shape. * - * Do this by writing small_rank - 1 dimensional checker - * board slices from the in memory large data set to the on - * disk small cube dataset. After each write, read the - * slice of the small dataset back from disk, and verify - * that it contains the expected data. Verify that - * H5S_select_shape_same() returns true on the memory and - * file selections. + * Do this by writing small_rank - 1 dimensional checker + * board slices from the in memory large data set to the on + * disk small cube dataset. After each write, read the + * slice of the small dataset back from disk, and verify + * that it contains the expected data. Verify that + * H5Sselect_shape_same() returns true on the memory and + * file selections. * - * Return: void + * Return: void * - * Programmer: JRM -- 8/15/11 - * - * Modifications: - * - * None. + * Programmer: JRM -- 8/15/11 * *------------------------------------------------------------------------- */ @@ -3589,94 +3051,78 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) #define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) +ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()"; #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - hbool_t data_ok = FALSE; - hbool_t mis_match = FALSE; - int i, j, k, l; - size_t u; - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t * ptr_1; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t data_ok = FALSE; + int i, j, k, l; + size_t u; + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - /* now we go in the opposite direction, verifying that we can write * from memory to file using selections of different rank that - * H5S_select_shape_same() views as being of the same shape. + * H5Sselect_shape_same() views as being of the same shape. * * Start by writing small_rank - 1 D slices from the in memory large data - * set to the on disk small dataset. After each write, read the slice of - * the small dataset back from disk, and verify that it contains the - * expected data. Verify that H5S_select_shape_same() returns true on + * set to the on disk small dataset. After each write, read the slice of + * the small dataset back from disk, and verify that it contains the + * expected data. Verify that H5Sselect_shape_same() returns true on * the memory and file selections. */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; - for ( i = 1; i < tv_ptr->large_rank; i++ ) { + for (i = 1; i < tv_ptr->large_rank; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } - ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded"); + ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded"); + ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, - tv_ptr->file_small_ds_sid_1, - tv_ptr->small_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, - sel_start); + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, + sel_start); /* set up start, stride, count, and block -- note that we will * change start[] so as to read slices of the large cube. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } @@ -3685,60 +3131,58 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) /* zero out the in memory small ds */ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size); - -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG - HDfprintf(stdout, - "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n", - fcnName); +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, + "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n", + fcnName); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ /* in serial versions of this test, we loop through all the dimensions - * of the large data set that don't appear in the small data set. + * of the large data set that don't appear in the small data set. * - * However, in the parallel version, each process only works with that - * slice of the large (and small) data set indicated by its rank -- hence - * we set the most slowly changing index to mpi_rank, and don't itterate + * However, in the parallel version, each process only works with that + * slice of the large (and small) data set indicated by its rank -- hence + * we set the most slowly changing index to mpi_rank, and don't iterate * over it. */ - - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } j = 0; do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -3750,11 +3194,11 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; - - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ @@ -3762,16 +3206,12 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) * by the assertions at the head of this function. Thus no * need for another inner loop. */ - + /* zero out this rank's slice of the on disk small data set */ - ret = H5Dwrite(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_2); + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2); VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded."); - + /* select the portion of the in memory large cube from which we * are going to write data. */ @@ -3780,120 +3220,87 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[2] = (hsize_t)k; tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - - HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd - ( - tv_ptr->mpi_rank, - tv_ptr->mem_large_ds_sid, - tv_ptr->large_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, - tv_ptr->start - ); - - - /* verify that H5S_select_shape_same() reports the in - * memory checkerboard selection of the slice through the + + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the in + * memory checkerboard selection of the slice through the * large dataset and the checkerboard selection of the process * slice of the small data set as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_1, - tv_ptr->mem_large_ds_sid); - VRFY((check == TRUE), "H5S_select_shape_same_test passed."); - - - /* write the checker board selection of the slice from the in - * memory large data set to the slice of the on disk small - * dataset. + check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed."); + + /* write the checker board selection of the slice from the in + * memory large data set to the slice of the on disk small + * dataset. */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], - tv_ptr->start[3], tv_ptr->start[4]); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid), H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1)); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */ - ret = H5Dwrite(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_sid, - tv_ptr->file_small_ds_sid_1, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_0); + ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0); VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded."); - - + /* read the on disk process slice of the small dataset into memory */ - ret = H5Dread(tv_ptr->small_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_small_ds_sid, - tv_ptr->file_small_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_1); + ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1); VRFY((ret >= 0), "H5Dread() slice from small ds succeeded."); - - + /* verify that expected data is retrieved */ - - mis_match = FALSE; - - expected_value = (uint32_t)( - (i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); - + + expected_value = + (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size; - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - HDassert( start_index < stop_index ); - HDassert( stop_index <= tv_ptr->small_ds_size ); - + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; + + HDassert(start_index < stop_index); + HDassert(stop_index <= tv_ptr->small_ds_size); + data_ok = TRUE; - + ptr_1 = tv_ptr->small_ds_buf_1; - for ( u = 0; u < start_index; u++, ptr_1++ ) { - - if ( *ptr_1 != 0 ) { + for (u = 0; u < start_index; u++, ptr_1++) { + + if (*ptr_1 != 0) { data_ok = FALSE; - *ptr_1 = 0; + *ptr_1 = 0; } } - data_ok &= ckrbrd_hs_dr_pio_test__verify_data - ( - tv_ptr->small_ds_buf_1 + start_index, - tv_ptr->small_rank - 1, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - expected_value, - (hbool_t)TRUE - ); - + data_ok &= ckrbrd_hs_dr_pio_test__verify_data( + tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size, + tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE); ptr_1 = tv_ptr->small_ds_buf_1; - for ( u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++ ) { + for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) { - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { data_ok = FALSE; - *ptr_1 = 0; + *ptr_1 = 0; } } - VRFY((data_ok == TRUE), - "large slice write slice to small slice data good."); + VRFY((data_ok == TRUE), "large slice write slice to small slice data good."); (tv_ptr->tests_run)++; } @@ -3902,49 +3309,38 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* ckrbrd_hs_dr_pio_test__m2d_l2s() */ - /*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__m2d_s2l() - * - * Purpose: Part four of a series of tests of I/O to/from checker - * board hyperslab selections of different rank in the parallel. + * Function: ckrbrd_hs_dr_pio_test__m2d_s2l() * - * Verify that we can write from memory to file using - * selections of different rank that H5S_select_shape_same() - * views as being of the same shape. + * Purpose: Part four of a series of tests of I/O to/from checker + * board hyperslab selections of different rank in the parallel. * - * Do this by writing checker board selections of the contents - * of the process's slice of the in memory small data set to - * slices of the on disk large data set. After each write, - * read the process's slice of the large data set back into - * memory, and verify that it contains the expected data. + * Verify that we can write from memory to file using + * selections of different rank that H5Sselect_shape_same() + * views as being of the same shape. * - * Verify that H5S_select_shape_same() returns true on the - * memory and file selections. + * Do this by writing checker board selections of the contents + * of the process's slice of the in memory small data set to + * slices of the on disk large data set. After each write, + * read the process's slice of the large data set back into + * memory, and verify that it contains the expected data. * - * Return: void + * Verify that H5Sselect_shape_same() returns true on the + * memory and file selections. * - * Programmer: JRM -- 8/15/11 + * Return: void * - * Modifications: - * - * None + * Programmer: JRM -- 8/15/11 * *------------------------------------------------------------------------- */ @@ -3952,94 +3348,80 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr) #define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) +ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr) { -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()"; #endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - hbool_t data_ok = FALSE; - hbool_t mis_match = FALSE; - int i, j, k, l; - size_t u; - size_t start_index; - size_t stop_index; - uint32_t expected_value; - uint32_t * ptr_1; - int mpi_rank; /* needed by VRFY */ - hsize_t sel_start[PAR_SS_DR_MAX_RANK]; - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + hbool_t data_ok = FALSE; + int i, j, k, l; + size_t u; + size_t start_index; + size_t stop_index; + uint32_t expected_value; + uint32_t *ptr_1; + int mpi_rank; /* needed by VRFY */ + hsize_t sel_start[PAR_SS_DR_MAX_RANK]; + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ /* initialize the local copy of mpi_rank */ mpi_rank = tv_ptr->mpi_rank; - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5S_select_shape_same() returns true on the memory + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory * and file selections. */ - tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); + tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank); tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1)); - tv_ptr->count[0] = 1; - tv_ptr->block[0] = 1; + tv_ptr->count[0] = 1; + tv_ptr->block[0] = 1; - for ( i = 1; i < tv_ptr->large_rank; i++ ) { + for (i = 1; i < tv_ptr->large_rank; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); + tv_ptr->count[i] = 1; + tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } - ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded"); - - ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, - H5S_SELECT_SET, - tv_ptr->start, - tv_ptr->stride, - tv_ptr->count, - tv_ptr->block); - VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) suceeded"); - - /* setup a checkerboard selection of the slice of the in memory small + ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded"); + + ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride, + tv_ptr->count, tv_ptr->block); + VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded"); + + /* setup a checkerboard selection of the slice of the in memory small * data set associated with the process's mpi rank. */ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0; - sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, - tv_ptr->mem_small_ds_sid, - tv_ptr->small_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, + sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank, + tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, sel_start); /* set up start, stride, count, and block -- note that we will - * change start[] so as to write checkerboard selections of slices + * change start[] so as to write checkerboard selections of slices * of the small data set to slices of the large data set. */ - for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) { + for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) { - tv_ptr->start[i] = 0; + tv_ptr->start[i] = 0; tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size); - tv_ptr->count[i] = 1; - if ( (PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1) ) { + tv_ptr->count[i] = 1; + if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) { tv_ptr->block[i] = 1; - - } else { + } + else { tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size); } @@ -4049,47 +3431,48 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size); #if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG - HDfprintf(stdout, - "%s writing process checkerboard selections of slices of small ds to process slices of large ds on disk.\n", - fcnName); + HDfprintf(stdout, + "%s writing process checkerboard selections of slices of small ds to process slices of large " + "ds on disk.\n", + fcnName); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) { i = tv_ptr->mpi_rank; - - } else { + } + else { i = 0; } - /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to + /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to * loop over it -- either we are setting i to mpi_rank, or - * we are setting it to zero. It will not change during the + * we are setting it to zero. It will not change during the * test. */ - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) { j = tv_ptr->mpi_rank; - - } else { + } + else { j = 0; } do { - if ( PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2 ) { + if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) { k = tv_ptr->mpi_rank; - - } else { + } + else { k = 0; } do { - /* since small rank >= 2 and large_rank > small_rank, we + /* since small rank >= 2 and large_rank > small_rank, we * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5 * (baring major re-orgaization), this gives us: * @@ -4101,11 +3484,11 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) l = 0; do { - if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */ + if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */ (tv_ptr->tests_skipped)++; - - } else { /* run the test */ + } + else { /* run the test */ tv_ptr->skips = 0; /* reset the skips counter */ @@ -4118,14 +3501,9 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) * Note that this will leave one slice with its original data * as there is one more slice than processes. */ - ret = H5Dwrite(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_sid, - tv_ptr->file_large_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_2); - VRFY((ret != FAIL), "H5Dwrite() to zero large ds suceeded"); - + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2); + VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded"); /* select the portion of the in memory large cube to which we * are going to write data. @@ -4136,126 +3514,89 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) tv_ptr->start[3] = (hsize_t)l; tv_ptr->start[4] = 0; - HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1)); - HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1)); - - ckrbrd_hs_dr_pio_test__slct_ckrbrd - ( - tv_ptr->mpi_rank, - tv_ptr->file_large_ds_sid_1, - tv_ptr->large_rank, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - tv_ptr->small_rank - 1, - tv_ptr->start - ); - - - /* verify that H5S_select_shape_same() reports the in + HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1)); + HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1)); + + ckrbrd_hs_dr_pio_test__slct_ckrbrd( + tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size, + tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start); + + /* verify that H5Sselect_shape_same() reports the in * memory small data set slice selection and the * on disk slice through the large data set selection * as having the same shape. */ - check = H5S_select_shape_same_test(tv_ptr->mem_small_ds_sid, - tv_ptr->file_large_ds_sid_1); - VRFY((check == TRUE), "H5S_select_shape_same_test passed"); + check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1); + VRFY((check == TRUE), "H5Sselect_shape_same passed"); - - /* write the small data set slice from memory to the - * target slice of the disk data set + /* write the small data set slice from memory to the + * target slice of the disk data set */ -#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG - HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", - fcnName, tv_ptr->mpi_rank, - tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], - tv_ptr->start[3], tv_ptr->start[4]); - HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", - fcnName, tv_ptr->mpi_rank, +#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank, + tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3], + tv_ptr->start[4]); + HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank, H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid), H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1)); #endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */ - ret = H5Dwrite(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_small_ds_sid, - tv_ptr->file_large_ds_sid_1, - tv_ptr->xfer_plist, - tv_ptr->small_ds_buf_0); - VRFY((ret != FAIL), - "H5Dwrite of small ds slice to large ds succeeded"); + ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid, + tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0); + VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded"); - - /* read this processes slice on the on disk large + /* read this processes slice on the on disk large * data set into memory. */ - ret = H5Dread(tv_ptr->large_dataset, - H5T_NATIVE_UINT32, - tv_ptr->mem_large_ds_sid, - tv_ptr->file_large_ds_sid_0, - tv_ptr->xfer_plist, - tv_ptr->large_ds_buf_1); - VRFY((ret != FAIL), - "H5Dread() of process slice of large ds succeeded"); - + ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid, + tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1); + VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded"); /* verify that the expected data and only the * expected data was read. */ - expected_value = - (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); - - start_index = (size_t) - ((i * tv_ptr->edge_size * tv_ptr->edge_size * - tv_ptr->edge_size * tv_ptr->edge_size) + - (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + - (k * tv_ptr->edge_size * tv_ptr->edge_size) + - (l * tv_ptr->edge_size)); - stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - - HDassert( start_index < stop_index ); - HDassert( stop_index < tv_ptr->large_ds_size ); + expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size); + start_index = + (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size * + tv_ptr->edge_size) + + (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) + + (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size)); + stop_index = start_index + tv_ptr->small_ds_slice_size - 1; - mis_match = FALSE; + HDassert(start_index < stop_index); + HDassert(stop_index < tv_ptr->large_ds_size); data_ok = TRUE; ptr_1 = tv_ptr->large_ds_buf_1; - for ( u = 0; u < start_index; u++, ptr_1++ ) { + for (u = 0; u < start_index; u++, ptr_1++) { - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { data_ok = FALSE; - *ptr_1 = 0; + *ptr_1 = 0; } } - data_ok &= ckrbrd_hs_dr_pio_test__verify_data - ( - tv_ptr->large_ds_buf_1 + start_index, - tv_ptr->small_rank - 1, - tv_ptr->edge_size, - tv_ptr->checker_edge_size, - expected_value, - (hbool_t)TRUE - ); - + data_ok &= ckrbrd_hs_dr_pio_test__verify_data( + tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size, + tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE); ptr_1 = tv_ptr->large_ds_buf_1; - for ( u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++ ) { + for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) { - if ( *ptr_1 != 0 ) { + if (*ptr_1 != 0) { data_ok = FALSE; - *ptr_1 = 0; + *ptr_1 = 0; } } - VRFY((data_ok == TRUE), - "small ds cb slice write to large ds slice data good."); + VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good."); (tv_ptr->tests_run)++; } @@ -4264,40 +3605,25 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) (tv_ptr->total_tests)++; - } while ( ( tv_ptr->large_rank > 2 ) && - ( (tv_ptr->small_rank - 1) <= 1 ) && - ( l < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size)); k++; - } while ( ( tv_ptr->large_rank > 3 ) && - ( (tv_ptr->small_rank - 1) <= 2 ) && - ( k < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size)); j++; - } while ( ( tv_ptr->large_rank > 4 ) && - ( (tv_ptr->small_rank - 1) <= 3 ) && - ( j < tv_ptr->edge_size ) ); + } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size)); return; } /* ckrbrd_hs_dr_pio_test__m2d_s2l() */ - /*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test__run_test() - * - * Purpose: Test I/O to/from checkerboard selections of hyperslabs of - * different rank in the parallel. + * Function: ckrbrd_hs_dr_pio_test__run_test() * - * Return: void + * Purpose: Test I/O to/from checkerboard selections of hyperslabs of + * different rank in the parallel. * - * Programmer: JRM -- 10/10/09 + * Return: void * - * Modifications: - * - * JRM -- 9/16/10 - * Added the express_test parameter. Use it to control - * whether we set an alignment, and whether we allocate - * chunks such that no two processes will normally touch - * the same chunk. + * Programmer: JRM -- 10/10/09 * *------------------------------------------------------------------------- */ @@ -4305,29 +3631,18 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr) #define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0 static void -ckrbrd_hs_dr_pio_test__run_test(const int test_num, - const int edge_size, - const int checker_edge_size, - const int chunk_edge_size, - const int small_rank, - const int large_rank, - const hbool_t use_collective_io, - const hid_t dset_type, - const int express_test, - int * skips_ptr, - int max_skips, - int64_t * total_tests_ptr, - int64_t * tests_run_ptr, - int64_t * tests_skipped_ptr) +ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size, + const int chunk_edge_size, const int small_rank, const int large_rank, + const hbool_t use_collective_io, const hid_t dset_type, + const int express_test, int *skips_ptr, int max_skips, + int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr) { #if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()"; #endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - int mpi_rank; /* needed by VRFY */ - struct hs_dr_pio_test_vars_t test_vars = - { - /* int mpi_size = */ -1, + struct hs_dr_pio_test_vars_t test_vars = { + /* int mpi_size = */ -1, /* int mpi_rank = */ -1, /* MPI_Comm mpi_comm = */ MPI_COMM_NULL, /* MPI_Inf mpi_info = */ MPI_INFO_NULL, @@ -4343,12 +3658,12 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, /* uint32_t * small_ds_buf_2 = */ NULL, /* uint32_t * small_ds_slice_buf = */ NULL, /* uint32_t * large_ds_buf_0 = */ NULL, - /* uint32_t * large_ds_buf_1 = */ NULL, + /* uint32_t * large_ds_buf_1 = */ NULL, /* uint32_t * large_ds_buf_2 = */ NULL, /* uint32_t * large_ds_slice_buf = */ NULL, /* int small_ds_offset = */ -1, /* int large_ds_offset = */ -1, - /* hid_t fid = */ -1, /* HDF5 file ID */ + /* hid_t fid = */ -1, /* HDF5 file ID */ /* hid_t xfer_plist = */ H5P_DEFAULT, /* hid_t full_mem_small_ds_sid = */ -1, /* hid_t full_file_small_ds_sid = */ -1, @@ -4364,61 +3679,50 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, /* hid_t file_large_ds_process_slice_sid = */ -1, /* hid_t mem_large_ds_process_slice_sid = */ -1, /* hid_t large_ds_slice_sid = */ -1, - /* hid_t small_dataset = */ -1, /* Dataset ID */ - /* hid_t large_dataset = */ -1, /* Dataset ID */ + /* hid_t small_dataset = */ -1, /* Dataset ID */ + /* hid_t large_dataset = */ -1, /* Dataset ID */ /* size_t small_ds_size = */ 1, /* size_t small_ds_slice_size = */ 1, /* size_t large_ds_size = */ 1, /* size_t large_ds_slice_size = */ 1, - /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, - /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0,0,0,0,0}, + /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, + /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0}, /* hsize_t * start_ptr = */ NULL, /* hsize_t * stride_ptr = */ NULL, /* hsize_t * count_ptr = */ NULL, /* hsize_t * block_ptr = */ NULL, - /* int skips = */ 0, - /* int max_skips = */ 0, + /* int skips = */ 0, + /* int max_skips = */ 0, /* int64_t total_tests = */ 0, /* int64_t tests_run = */ 0, - /* int64_t tests_skipped = */ 0 - }; - struct hs_dr_pio_test_vars_t * tv_ptr = &test_vars; - - hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, - chunk_edge_size, small_rank, large_rank, - use_collective_io, dset_type, express_test, - tv_ptr); - - - /* initialize the local copy of mpi_rank */ - mpi_rank = tv_ptr->mpi_rank; + /* int64_t tests_skipped = */ 0}; + struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars; + hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, + use_collective_io, dset_type, express_test, tv_ptr); /* initialize skips & max_skips */ - tv_ptr->skips = *skips_ptr; + tv_ptr->skips = *skips_ptr; tv_ptr->max_skips = max_skips; - #if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { - HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", - test_num, small_rank, large_rank); + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank); HDfprintf(stdout, "test %d: Initialization complete.\n", test_num); } #endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ - /* first, verify that we can read from disk correctly using selections - * of different rank that H5S_select_shape_same() views as being of the + * of different rank that H5Sselect_shape_same() views as being of the * same shape. * - * Start by reading a (small_rank - 1)-D slice from this processes slice - * of the on disk large data set, and verifying that the data read is - * correct. Verify that H5S_select_shape_same() returns true on the + * Start by reading a (small_rank - 1)-D slice from this processes slice + * of the on disk large data set, and verifying that the data read is + * correct. Verify that H5Sselect_shape_same() returns true on the * memory and file selections. * * The first step is to set up the needed checker board selection in the @@ -4427,53 +3731,48 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr); - - /* similarly, read slices of the on disk small data set into slices - * through the in memory large data set, and verify that the correct + /* similarly, read slices of the on disk small data set into slices + * through the in memory large data set, and verify that the correct * data (and only the correct data) is read. */ ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr); - /* now we go in the opposite direction, verifying that we can write * from memory to file using selections of different rank that - * H5S_select_shape_same() views as being of the same shape. + * H5Sselect_shape_same() views as being of the same shape. * * Start by writing small_rank - 1 D slices from the in memory large data - * set to the on disk small dataset. After each write, read the slice of - * the small dataset back from disk, and verify that it contains the - * expected data. Verify that H5S_select_shape_same() returns true on + * set to the on disk small dataset. After each write, read the slice of + * the small dataset back from disk, and verify that it contains the + * expected data. Verify that H5Sselect_shape_same() returns true on * the memory and file selections. */ ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr); - - /* Now write the contents of the process's slice of the in memory - * small data set to slices of the on disk large data set. After + /* Now write the contents of the process's slice of the in memory + * small data set to slices of the on disk large data set. After * each write, read the process's slice of the large data set back - * into memory, and verify that it contains the expected data. - * Verify that H5S_select_shape_same() returns true on the memory + * into memory, and verify that it contains the expected data. + * Verify that H5Sselect_shape_same() returns true on the memory * and file selections. */ ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr); - #if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { - HDfprintf(stdout, - "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", - test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), - (long long)(tv_ptr->total_tests)); + if (MAINPROCESS) { + HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n", + test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped), + (long long)(tv_ptr->total_tests)); } #endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ hs_dr_pio_test__takedown(tv_ptr); #if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG - if ( MAINPROCESS ) { + if (MAINPROCESS) { HDfprintf(stdout, "test %d: Takedown complete.\n", test_num); } #endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */ @@ -4487,30 +3786,15 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, } /* ckrbrd_hs_dr_pio_test__run_test() */ - /*------------------------------------------------------------------------- - * Function: ckrbrd_hs_dr_pio_test() - * - * Purpose: Test I/O to/from hyperslab selections of different rank in - * the parallel case. - * - * Return: void + * Function: ckrbrd_hs_dr_pio_test() * - * Programmer: JRM -- 9/18/09 + * Purpose: Test I/O to/from hyperslab selections of different rank in + * the parallel case. * - * Modifications: + * Return: void * - * Modified function to take a sample of the run times - * of the different tests, and skip some of them if - * run times are too long. - * - * We need to do this because Lustre runns very slowly - * if two or more processes are banging on the same - * block of memory. - * JRM -- 9/10/10 - * Break this one big test into 4 smaller tests according - * to {independent,collective}x{contigous,chunked} datasets. - * AKC -- 2010/01/17 + * Programmer: JRM -- 9/18/09 * *------------------------------------------------------------------------- */ @@ -4518,29 +3802,29 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num, static void ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) { - int express_test; - int local_express_test; - int mpi_size = -1; - int mpi_rank = -1; - int test_num = 0; - int edge_size; - int checker_edge_size = 3; - int chunk_edge_size = 0; - int small_rank = 3; - int large_rank = 4; - int mpi_result; - hid_t dset_type = H5T_NATIVE_UINT; - int skips = 0; - int max_skips = 0; + int express_test; + int local_express_test; + int mpi_size = -1; + int mpi_rank = -1; + int test_num = 0; + int edge_size; + int checker_edge_size = 3; + int chunk_edge_size = 0; + int small_rank = 3; + int large_rank = 4; + int mpi_result; + hid_t dset_type = H5T_NATIVE_UINT; + int skips = 0; + int max_skips = 0; /* The following table list the number of sub-tests skipped between * each test that is actually executed as a function of the express * test level. Note that any value in excess of 4880 will cause all * sub tests to be skipped. */ - int max_skips_tbl[4] = {0, 4, 64, 1024}; - int64_t total_tests = 0; - int64_t tests_run = 0; - int64_t tests_skipped = 0; + int max_skips_tbl[4] = {0, 4, 64, 1024}; + int64_t total_tests = 0; + int64_t tests_run = 0; + int64_t tests_skipped = 0; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -4551,73 +3835,50 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); - mpi_result = MPI_Allreduce((void *)&local_express_test, - (void *)&express_test, - 1, - MPI_INT, - MPI_MAX, + mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - VRFY((mpi_result == MPI_SUCCESS ), "MPI_Allreduce(0) succeeded"); + VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded"); - if ( local_express_test < 0 ) { + if (local_express_test < 0) { max_skips = max_skips_tbl[0]; - } else if ( local_express_test > 3 ) { + } + else if (local_express_test > 3) { max_skips = max_skips_tbl[3]; - } else { + } + else { max_skips = max_skips_tbl[local_express_test]; } -#if 0 +#if 0 { int DebugWait = 1; - + while (DebugWait) ; } -#endif +#endif - for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) { + for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) { - for ( small_rank = 2; small_rank < large_rank; small_rank++ ) { - switch(sstest_type){ + for (small_rank = 2; small_rank < large_rank; small_rank++) { + switch (sstest_type) { case IND_CONTIG: /* contiguous data set, independent I/O */ chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test(test_num, - edge_size, - checker_edge_size, - chunk_edge_size, - small_rank, - large_rank, - FALSE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, FALSE, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case IND_CONTIG */ - case COL_CONTIG: + case COL_CONTIG: /* contiguous data set, collective I/O */ chunk_edge_size = 0; - ckrbrd_hs_dr_pio_test__run_test(test_num, - edge_size, - checker_edge_size, - chunk_edge_size, - small_rank, - large_rank, - TRUE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, - &tests_skipped); + ckrbrd_hs_dr_pio_test__run_test( + test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, TRUE, + dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case COL_CONTIG */ @@ -4625,19 +3886,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case IND_CHUNKED: /* chunked data set, independent I/O */ chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test(test_num, - edge_size, - checker_edge_size, - chunk_edge_size, - small_rank, - large_rank, - FALSE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, + ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size, + small_rank, large_rank, FALSE, dset_type, express_test, + &skips, max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; @@ -4646,20 +3897,9 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) case COL_CHUNKED: /* chunked data set, collective I/O */ chunk_edge_size = 5; - ckrbrd_hs_dr_pio_test__run_test(test_num, - edge_size, - checker_edge_size, - chunk_edge_size, - small_rank, - large_rank, - TRUE, - dset_type, - express_test, - &skips, - max_skips, - &total_tests, - &tests_run, - &tests_skipped); + ckrbrd_hs_dr_pio_test__run_test( + test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank, TRUE, + dset_type, express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped); test_num++; break; /* end of case COL_CHUNKED */ @@ -4670,16 +3910,16 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) } /* end of switch(sstest_type) */ #if CONTIG_HS_DR_PIO_TEST__DEBUG - if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) { - HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", + if ((MAINPROCESS) && (tests_skipped > 0)) { + HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n", tests_run, tests_skipped, total_tests); } #endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */ } } - if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) { - HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n", + if ((MAINPROCESS) && (tests_skipped > 0)) { + HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n", tests_skipped, total_tests); } @@ -4696,23 +3936,23 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type) #include "testphdf5.h" #ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ +#define PATH_MAX 512 +#endif /* !PATH_MAX */ /* global variables */ int dim0; int dim1; int chunkdim0; int chunkdim1; -int nerrors = 0; /* errors count */ -int ndatasets = 300; /* number of datasets to create*/ -int ngroups = 512; /* number of groups to create in root - * group. */ -int facc_type = FACC_MPIO; /*Test file access type */ +int nerrors = 0; /* errors count */ +int ndatasets = 300; /* number of datasets to create*/ +int ngroups = 512; /* number of groups to create in root + * group. */ +int facc_type = FACC_MPIO; /*Test file access type */ int dxfer_coll_type = DXFER_COLLECTIVE_IO; -H5E_auto2_t old_func; /* previous error handler */ -void *old_client_data; /* previous error handler arg.*/ +H5E_auto2_t old_func; /* previous error handler */ +void *old_client_data; /* previous error handler arg.*/ /* other option flags */ @@ -4721,13 +3961,11 @@ void *old_client_data; /* previous error handler arg.*/ * created in one test is accessed by a different test. * filenames[0] is reserved as the file name for PARATESTFILE. */ -#define NFILENAME 2 +#define NFILENAME 2 #define PARATESTFILE filenames[0] -const char *FILENAME[NFILENAME]={ - "ShapeSameTest", - NULL}; -char filenames[NFILENAME][PATH_MAX]; -hid_t fapl; /* file access property list */ +const char *FILENAME[NFILENAME] = {"ShapeSameTest", NULL}; +char *filenames[NFILENAME]; +hid_t fapl; /* file access property list */ #ifdef USE_PAUSE /* pause the process for a moment to allow debugger to attach if desired. */ @@ -4736,15 +3974,16 @@ hid_t fapl; /* file access property list */ #include <sys/types.h> #include <sys/stat.h> -void pause_proc(void) +void +pause_proc(void) { - int pid; - h5_stat_t statbuf; - char greenlight[] = "go"; - int maxloop = 10; - int loops = 0; - int time_int = 10; + int pid; + h5_stat_t statbuf; + char greenlight[] = "go"; + int maxloop = 10; + int loops = 0; + int time_int = 10; /* mpi variables */ int mpi_size, mpi_rank; @@ -4757,28 +3996,28 @@ void pause_proc(void) MPI_Get_processor_name(mpi_name, &mpi_namelen); if (MAINPROCESS) - while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){ - if (!loops++){ - printf("Proc %d (%*s, %d): to debug, attach %d\n", - mpi_rank, mpi_namelen, mpi_name, pid, pid); - } - printf("waiting(%ds) for file %s ...\n", time_int, greenlight); - fflush(stdout); - sleep(time_int); - } + while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) { + if (!loops++) { + HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, + pid); + } + HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight); + fflush(stdout); + HDsleep(time_int); + } MPI_Barrier(MPI_COMM_WORLD); } /* Use the Profile feature of MPI to call the pause_proc() */ -int MPI_Init(int *argc, char ***argv) +int +MPI_Init(int *argc, char ***argv) { int ret_code; - ret_code=PMPI_Init(argc, argv); + ret_code = PMPI_Init(argc, argv); pause_proc(); return (ret_code); } -#endif /* USE_PAUSE */ - +#endif /* USE_PAUSE */ /* * Show command usage @@ -4786,232 +4025,235 @@ int MPI_Init(int *argc, char ***argv) static void usage(void) { - printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " - "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); - printf("\t-m<n_datasets>" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n<n_groups>" - "\tset number of groups for the multiple group test\n"); - printf("\t-f <prefix>\tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", - ROW_FACTOR, COL_FACTOR); - printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " + "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); + HDprintf("\t-m<n_datasets>" + "\tset number of datasets for the multiple dataset test\n"); + HDprintf("\t-n<n_groups>" + "\tset number of groups for the multiple group test\n"); + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, + COL_FACTOR); + HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); } - /* * parse the command line options */ static int parse_options(int argc, char **argv) { - int mpi_size, mpi_rank; /* mpi variables */ + int mpi_size, mpi_rank; /* mpi variables */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* setup default chunk-size. Make sure sizes are > 0 */ - chunkdim0 = (dim0+9)/10; - chunkdim1 = (dim1+9)/10; - - while (--argc){ - if (**(++argv) != '-'){ - break; - }else{ - switch(*(*argv+1)){ - case 'm': ndatasets = atoi((*argv+1)+1); - if (ndatasets < 0){ - nerrors++; - return(1); - } - break; - case 'n': ngroups = atoi((*argv+1)+1); - if (ngroups < 0){ - nerrors++; - return(1); - } - break; - case 'f': if (--argc < 1) { - nerrors++; - return(1); - } - if (**(++argv) == '-') { - nerrors++; - return(1); - } - paraprefix = *argv; - break; - case 'i': /* Collective MPI-IO access with independent IO */ - dxfer_coll_type = DXFER_INDEPENDENT_IO; - break; - case '2': /* Use the split-file driver with MPIO access */ - /* Can use $HDF5_METAPREFIX to define the */ - /* meta-file-prefix. */ - facc_type = FACC_MPIO | FACC_SPLIT; - break; - case 'd': /* dimensizes */ - if (--argc < 2){ - nerrors++; - return(1); - } - dim0 = atoi(*(++argv))*mpi_size; - argc--; - dim1 = atoi(*(++argv))*mpi_size; - /* set default chunkdim sizes too */ - chunkdim0 = (dim0+9)/10; - chunkdim1 = (dim1+9)/10; - break; - case 'c': /* chunk dimensions */ - if (--argc < 2){ - nerrors++; - return(1); - } - chunkdim0 = atoi(*(++argv)); - argc--; - chunkdim1 = atoi(*(++argv)); - break; - case 'h': /* print help message--return with nerrors set */ - return(1); - default: printf("Illegal option(%s)\n", *argv); - nerrors++; - return(1); - } - } + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + + while (--argc) { + if (**(++argv) != '-') { + break; + } + else { + switch (*(*argv + 1)) { + case 'm': + ndatasets = atoi((*argv + 1) + 1); + if (ndatasets < 0) { + nerrors++; + return (1); + } + break; + case 'n': + ngroups = atoi((*argv + 1) + 1); + if (ngroups < 0) { + nerrors++; + return (1); + } + break; + case 'f': + if (--argc < 1) { + nerrors++; + return (1); + } + if (**(++argv) == '-') { + nerrors++; + return (1); + } + paraprefix = *argv; + break; + case 'i': /* Collective MPI-IO access with independent IO */ + dxfer_coll_type = DXFER_INDEPENDENT_IO; + break; + case '2': /* Use the split-file driver with MPIO access */ + /* Can use $HDF5_METAPREFIX to define the */ + /* meta-file-prefix. */ + facc_type = FACC_MPIO | FACC_SPLIT; + break; + case 'd': /* dimensizes */ + if (--argc < 2) { + nerrors++; + return (1); + } + dim0 = atoi(*(++argv)) * mpi_size; + argc--; + dim1 = atoi(*(++argv)) * mpi_size; + /* set default chunkdim sizes too */ + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + break; + case 'c': /* chunk dimensions */ + if (--argc < 2) { + nerrors++; + return (1); + } + chunkdim0 = atoi(*(++argv)); + argc--; + chunkdim1 = atoi(*(++argv)); + break; + case 'h': /* print help message--return with nerrors set */ + return (1); + default: + HDprintf("Illegal option(%s)\n", *argv); + nerrors++; + return (1); + } + } } /*while*/ /* check validity of dimension and chunk sizes */ - if (dim0 <= 0 || dim1 <= 0){ - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); - nerrors++; - return(1); + if (dim0 <= 0 || dim1 <= 0) { + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + nerrors++; + return (1); } - if (chunkdim0 <= 0 || chunkdim1 <= 0){ - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); - nerrors++; - return(1); + if (chunkdim0 <= 0 || chunkdim1 <= 0) { + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + nerrors++; + return (1); } /* Make sure datasets can be divided into equal portions by the processes */ - if ((dim0 % mpi_size) || (dim1 % mpi_size)){ - if (MAINPROCESS) - printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", - dim0, dim1, mpi_size); - nerrors++; - return(1); + if ((dim0 % mpi_size) || (dim1 % mpi_size)) { + if (MAINPROCESS) + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); + nerrors++; + return (1); } /* compose the test filenames */ { - int i, n; - - n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - - for (i=0; i < n; i++) - if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i])) - == NULL){ - printf("h5_fixname failed\n"); - nerrors++; - return(1); - } - printf("Test filenames are:\n"); - for (i=0; i < n; i++) - printf(" %s\n", filenames[i]); + int i, n; + + n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + + for (i = 0; i < n; i++) + if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { + HDprintf("h5_fixname failed\n"); + nerrors++; + return (1); + } + HDprintf("Test filenames are:\n"); + for (i = 0; i < n; i++) + HDprintf(" %s\n", filenames[i]); } - return(0); + return (0); } - /* * Create the appropriate File access property list */ hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) { - hid_t ret_pl = -1; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ /* need the rank for error checking macros */ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - ret_pl = H5Pcreate (H5P_FILE_ACCESS); + ret_pl = H5Pcreate(H5P_FILE_ACCESS); VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO){ - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - return(ret_pl); + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY((ret >= 0), ""); + return (ret_pl); } - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){ - hid_t mpio_pl; - - mpio_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return(ret_pl); + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); } /* unknown file access types */ return (ret_pl); } - -/* Shape Same test using contigous hyperslab using independent IO on contigous datasets */ +/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */ static void sscontig1(void) { contig_hs_dr_pio_test(IND_CONTIG); } -/* Shape Same test using contigous hyperslab using collective IO on contigous datasets */ +/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */ static void sscontig2(void) { contig_hs_dr_pio_test(COL_CONTIG); } -/* Shape Same test using contigous hyperslab using independent IO on chunked datasets */ +/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */ static void sscontig3(void) { contig_hs_dr_pio_test(IND_CHUNKED); } -/* Shape Same test using contigous hyperslab using collective IO on chunked datasets */ +/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */ static void sscontig4(void) { contig_hs_dr_pio_test(COL_CHUNKED); } - -/* Shape Same test using checker hyperslab using independent IO on contigous datasets */ +/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */ static void sschecker1(void) { ckrbrd_hs_dr_pio_test(IND_CONTIG); } -/* Shape Same test using checker hyperslab using collective IO on contigous datasets */ +/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */ static void sschecker2(void) { @@ -5032,10 +4274,10 @@ sschecker4(void) ckrbrd_hs_dr_pio_test(COL_CHUNKED); } - -int main(int argc, char **argv) +int +main(int argc, char **argv) { - int mpi_size, mpi_rank; /* mpi variables */ + int mpi_size, mpi_rank; /* mpi variables */ #ifndef H5_HAVE_WIN32_API /* Un-buffer the stdout and stderr */ @@ -5047,14 +4289,14 @@ int main(int argc, char **argv) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - dim0 = ROW_FACTOR*mpi_size; - dim1 = COL_FACTOR*mpi_size; + dim0 = ROW_FACTOR * mpi_size; + dim1 = COL_FACTOR * mpi_size; - if (MAINPROCESS){ - printf("===================================\n"); - printf("Shape Same Tests Start\n"); - printf(" express_test = %d.\n", GetTestExpress()); - printf("===================================\n"); + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("Shape Same Tests Start\n"); + HDprintf(" express_test = %d.\n", GetTestExpress()); + HDprintf("===================================\n"); } /* Attempt to turn off atexit post processing so that in case errors @@ -5062,54 +4304,51 @@ int main(int argc, char **argv) * hang in the atexit post processing in which it may try to make MPI * calls. By then, MPI calls may not work. */ - if (H5dont_atexit() < 0){ - printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); + if (H5dont_atexit() < 0) { + HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank); }; H5open(); h5_show_hostname(); + HDmemset(filenames, 0, sizeof(filenames)); + for (int i = 0; i < NFILENAME; i++) { + if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) { + HDprintf("couldn't allocate filename array\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } + /* Initialize testing framework */ TestInit(argv[0], usage, parse_options); - /* Shape Same tests using contigous hyperslab */ -#if 1 - AddTest("sscontig1", sscontig1, NULL, - "Shape Same, contigous hyperslab, ind IO, contig datasets", PARATESTFILE); - AddTest("sscontig2", sscontig2, NULL, - "Shape Same, contigous hyperslab, col IO, contig datasets", PARATESTFILE); - AddTest("sscontig3", sscontig3, NULL, - "Shape Same, contigous hyperslab, ind IO, chunked datasets", PARATESTFILE); - AddTest("sscontig4", sscontig4, NULL, - "Shape Same, contigous hyperslab, col IO, chunked datasets", PARATESTFILE); -#endif + /* Shape Same tests using contiguous hyperslab */ + AddTest("sscontig1", sscontig1, NULL, "Cntg hslab, ind IO, cntg dsets", PARATESTFILE); + AddTest("sscontig2", sscontig2, NULL, "Cntg hslab, col IO, cntg dsets", PARATESTFILE); + AddTest("sscontig3", sscontig3, NULL, "Cntg hslab, ind IO, chnk dsets", PARATESTFILE); + AddTest("sscontig4", sscontig4, NULL, "Cntg hslab, col IO, chnk dsets", PARATESTFILE); /* Shape Same tests using checker board hyperslab */ - AddTest("sschecker1", sschecker1, NULL, - "Shape Same, checker hyperslab, ind IO, contig datasets", PARATESTFILE); - AddTest("sschecker2", sschecker2, NULL, - "Shape Same, checker hyperslab, col IO, contig datasets", PARATESTFILE); - AddTest("sschecker3", sschecker3, NULL, - "Shape Same, checker hyperslab, ind IO, chunked datasets", PARATESTFILE); - AddTest("sschecker4", sschecker4, NULL, - "Shape Same, checker hyperslab, col IO, chunked datasets", PARATESTFILE); + AddTest("sschecker1", sschecker1, NULL, "Check hslab, ind IO, cntg dsets", PARATESTFILE); + AddTest("sschecker2", sschecker2, NULL, "Check hslab, col IO, cntg dsets", PARATESTFILE); + AddTest("sschecker3", sschecker3, NULL, "Check hslab, ind IO, chnk dsets", PARATESTFILE); + AddTest("sschecker4", sschecker4, NULL, "Check hslab, col IO, chnk dsets", PARATESTFILE); /* Display testing information */ TestInfo(argv[0]); /* setup file access property list */ - fapl = H5Pcreate (H5P_FILE_ACCESS); + fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* Parse command line arguments */ TestParseCmdLine(argc, argv); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){ - printf("===================================\n" - " Using Independent I/O with file set view to replace collective I/O \n" - "===================================\n"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { + HDprintf("===================================\n" + " Using Independent I/O with file set view to replace collective I/O \n" + "===================================\n"); } - /* Perform requested testing */ PerformTests(); @@ -5123,7 +4362,7 @@ int main(int argc, char **argv) TestSummary(); /* Clean up test files */ - h5_cleanup(FILENAME, fapl); + h5_clean_files(FILENAME, fapl); nerrors += GetTestNumErrs(); @@ -5131,20 +4370,31 @@ int main(int argc, char **argv) { int temp; MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - nerrors=temp; + nerrors = temp; + } + + if (MAINPROCESS) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrors) + HDprintf("***Shape Same tests detected %d errors***\n", nerrors); + else + HDprintf("Shape Same tests finished with no errors\n"); + HDprintf("===================================\n"); } - if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); - if (nerrors) - printf("***Shape Same tests detected %d errors***\n", nerrors); - else - printf("Shape Same tests finished with no errors\n"); - printf("===================================\n"); + for (int i = 0; i < NFILENAME; i++) { + HDfree(filenames[i]); + filenames[i] = NULL; } + /* close HDF5 library */ + H5close(); + + /* Release test infrastructure */ + TestShutdown(); + MPI_Finalize(); /* cannot just return (nerrors) because exit code is limited to 1byte */ - return(nerrors!=0); + return (nerrors != 0); } diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c index 6e233a9..aab2b59 100644 --- a/testpar/t_span_tree.c +++ b/testpar/t_span_tree.c @@ -1,17 +1,14 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -25,38 +22,35 @@ 2) We will read two datasets with the same hyperslab selection settings, 1. independent read to read independent output, independent read to read collecive output, - Compare the result, - If the result is the same, then collective write succeeds. + Compare the result, + If the result is the same, then collective write succeeds. 2. collective read to read independent output, independent read to read independent output, - Compare the result, - If the result is the same, then collective read succeeds. + Compare the result, + If the result is the same, then collective read succeeds. */ -#include "hdf5.h" #include "H5private.h" #include "testphdf5.h" +#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0 static void coll_write_test(int chunk_factor); -static void coll_read_test(int chunk_factor); - +static void coll_read_test(void); /*------------------------------------------------------------------------- - * Function: coll_irregular_cont_write - * - * Purpose: Wrapper to test the collectively irregular hyperslab write in - contiguous storage + * Function: coll_irregular_cont_write * - * Return: Success: 0 + * Purpose: Wrapper to test the collectively irregular hyperslab write in + * contiguous storage * - * Failure: -1 + * Return: Success: 0 * - * Programmer: Unknown - * Dec 2nd, 2004 + * Failure: -1 * - * Modifications: + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ @@ -64,26 +58,21 @@ void coll_irregular_cont_write(void) { - coll_write_test(0); - + coll_write_test(0); } - - /*------------------------------------------------------------------------- - * Function: coll_irregular_cont_read - * - * Purpose: Wrapper to test the collectively irregular hyperslab read in - contiguous storage + * Function: coll_irregular_cont_read * - * Return: Success: 0 + * Purpose: Wrapper to test the collectively irregular hyperslab read in + * contiguous storage * - * Failure: -1 + * Return: Success: 0 * - * Programmer: Unknown - * Dec 2nd, 2004 + * Failure: -1 * - * Modifications: + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ @@ -91,25 +80,21 @@ void coll_irregular_cont_read(void) { - coll_read_test(0); - + coll_read_test(); } - /*------------------------------------------------------------------------- - * Function: coll_irregular_simple_chunk_write - * - * Purpose: Wrapper to test the collectively irregular hyperslab write in - chunk storage(1 chunk) + * Function: coll_irregular_simple_chunk_write * - * Return: Success: 0 + * Purpose: Wrapper to test the collectively irregular hyperslab write in + * chunk storage(1 chunk) * - * Failure: -1 + * Return: Success: 0 * - * Programmer: Unknown - * Dec 2nd, 2004 + * Failure: -1 * - * Modifications: + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ @@ -117,26 +102,21 @@ void coll_irregular_simple_chunk_write(void) { - coll_write_test(1); - + coll_write_test(1); } - - /*------------------------------------------------------------------------- - * Function: coll_irregular_simple_chunk_read + * Function: coll_irregular_simple_chunk_read * - * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk - storage(1 chunk) + * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk + * storage(1 chunk) * - * Return: Success: 0 + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * - * Programmer: Unknown - * Dec 2nd, 2004 - * - * Modifications: + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ @@ -144,24 +124,21 @@ void coll_irregular_simple_chunk_read(void) { - coll_read_test(1); - + coll_read_test(); } /*------------------------------------------------------------------------- - * Function: coll_irregular_complex_chunk_write + * Function: coll_irregular_complex_chunk_write * - * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk - storage(4 chunks) + * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk + * storage(4 chunks) * - * Return: Success: 0 + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * - * Programmer: Unknown - * Dec 2nd, 2004 - * - * Modifications: + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ @@ -169,26 +146,21 @@ void coll_irregular_complex_chunk_write(void) { - coll_write_test(4); - + coll_write_test(4); } - - /*------------------------------------------------------------------------- - * Function: coll_irregular_complex_chunk_read - * - * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk - storage(1 chunk) + * Function: coll_irregular_complex_chunk_read * - * Return: Success: 0 + * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk + * storage(1 chunk) * - * Failure: -1 + * Return: Success: 0 * - * Programmer: Unknown - * Dec 2nd, 2004 + * Failure: -1 * - * Modifications: + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ @@ -196,834 +168,799 @@ void coll_irregular_complex_chunk_read(void) { - coll_read_test(4); - + coll_read_test(); } - /*------------------------------------------------------------------------- - * Function: coll_write_test + * Function: coll_write_test * - * Purpose: To test the collectively irregular hyperslab write in chunk - storage + * Purpose: To test the collectively irregular hyperslab write in chunk + * storage * Input: number of chunks on each dimension - if number is equal to 0, contiguous storage - * Return: Success: 0 - * - * Failure: -1 + * if number is equal to 0, contiguous storage + * Return: Success: 0 * - * Programmer: Unknown - * Dec 2nd, 2004 + * Failure: -1 * - * Modifications: Oct 18th, 2005 + * Programmer: Unknown + * Dec 2nd, 2004 * *------------------------------------------------------------------------- */ -void coll_write_test(int chunk_factor) +void +coll_write_test(int chunk_factor) { - const char *filename; - hid_t facc_plist,dxfer_plist,dcrt_plist; - hid_t file, datasetc,dataseti; /* File and dataset identifiers */ - hid_t mspaceid1, mspaceid, fspaceid,fspaceid1; /* Dataspace identifiers */ + const char *filename; + hid_t facc_plist, dxfer_plist, dcrt_plist; + hid_t file, datasetc, dataseti; /* File and dataset identifiers */ + hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */ + + hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */ + hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */ + hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we + * read selection from the dataset on the disk + */ - hsize_t mdim1[1],fsdim[2],mdim[2]; + hsize_t start[2]; /* Start of hyperslab */ + hsize_t stride[2]; /* Stride of hyperslab */ + hsize_t count[2]; /* Block count */ + hsize_t block[2]; /* Block sizes */ + hsize_t chunk_dims[2]; -#if 0 - hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset - (in memory) */ - hsize_t fsdim[] = {FSPACE_DIM1, FSPACE_DIM2}; /* Dimension sizes of the dataset - (on disk) */ + herr_t ret; + int i; + int fillvalue = 0; /* Fill value for the dataset */ - hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the - dataset in memory when we - read selection from the - dataset on the disk */ -#endif + int *matrix_out = NULL; + int *matrix_out1 = NULL; /* Buffer to read from the dataset */ + int *vector = NULL; - hsize_t start[2]; /* Start of hyperslab */ - hsize_t stride[2]; /* Stride of hyperslab */ - hsize_t count[2]; /* Block count */ - hsize_t block[2]; /* Block sizes */ - hsize_t chunk_dims[2]; - - herr_t ret; - unsigned i; - int fillvalue = 0; /* Fill value for the dataset */ - -#if 0 - int matrix_out[MSPACE_DIM1][MSPACE_DIM2]; - int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the - dataset */ - int vector[MSPACE1_DIM]; -#endif + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + /*set up MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Obtain file name */ + filename = GetTestParameters(); + + /* + * Buffers' initialization. + */ + + mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size); + mdim[0] = MSPACE_DIM1; + mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size); + fsdim[0] = FSPACE_DIM1; + fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size); + + vector = (int *)HDmalloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size); + matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size); + matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size); + + HDmemset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size); + vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1; + for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++) + H5_CHECKED_ASSIGN(vector[i], int, i, unsigned); + + /* Grab file access property list */ + facc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((facc_plist >= 0), ""); + + /* + * Create a file. + */ + file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist); + VRFY((file >= 0), "H5Fcreate succeeded"); + + /* + * Create property list for a dataset and set up fill values. + */ + dcrt_plist = H5Pcreate(H5P_DATASET_CREATE); + VRFY((dcrt_plist >= 0), ""); + + ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue); + VRFY((ret >= 0), "Fill value creation property list succeeded"); + + if (chunk_factor != 0) { + chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor; + chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor; + ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims); + VRFY((ret >= 0), "chunk creation property list succeeded"); + } + + /* + * + * Create dataspace for the first dataset in the disk. + * dim1 = 9 + * dim2 = 3600 + * + * + */ + fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL); + VRFY((fspaceid >= 0), "file dataspace created succeeded"); + + /* + * Create dataset in the file. Notice that creation + * property list dcrt_plist is used. + */ + datasetc = + H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); + VRFY((datasetc >= 0), "dataset created succeeded"); + + dataseti = + H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); + VRFY((dataseti >= 0), "dataset created succeeded"); + + /* The First selection for FILE + * + * block (3,2) + * stride(4,3) + * count (1,768/mpi_size) + * start (0,1+768*3*mpi_rank/mpi_size) + * + */ + + start[0] = FHSTART0; + start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1); + stride[0] = FHSTRIDE0; + stride[1] = FHSTRIDE1; + count[0] = FHCOUNT0; + count[1] = FHCOUNT1; + block[0] = FHBLOCK0; + block[1] = FHBLOCK1; + + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The Second selection for FILE + * + * block (3,768) + * stride (1,1) + * count (1,1) + * start (4,768*mpi_rank/mpi_size) + * + */ + + start[0] = SHSTART0; + start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank); + stride[0] = SHSTRIDE0; + stride[1] = SHSTRIDE1; + count[0] = SHCOUNT0; + count[1] = SHCOUNT1; + block[0] = SHBLOCK0; + block[1] = SHBLOCK1; + + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Create dataspace for the first dataset in the memory + * dim1 = 27000 + * + */ + mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL); + VRFY((mspaceid1 >= 0), "memory dataspace created succeeded"); + + /* + * Memory space is 1-D, this is a good test to check + * whether a span-tree derived datatype needs to be built. + * block 1 + * stride 1 + * count 6912/mpi_size + * start 1 + * + */ + start[0] = MHSTART0; + stride[0] = MHSTRIDE0; + count[0] = MHCOUNT0; + block[0] = MHBLOCK0; + + ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* independent write */ + ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector); + VRFY((ret >= 0), "dataset independent write succeed"); + + dxfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxfer_plist >= 0), ""); + + ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "MPIO data transfer property list succeed"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* collective write */ + ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector); + VRFY((ret >= 0), "dataset collective write succeed"); + + ret = H5Sclose(mspaceid1); + VRFY((ret >= 0), ""); + + ret = H5Sclose(fspaceid); + VRFY((ret >= 0), ""); + + /* + * Close dataset. + */ + ret = H5Dclose(datasetc); + VRFY((ret >= 0), ""); + + ret = H5Dclose(dataseti); + VRFY((ret >= 0), ""); + + /* + * Close the file. + */ + ret = H5Fclose(file); + VRFY((ret >= 0), ""); + /* + * Close property list + */ + ret = H5Pclose(facc_plist); + VRFY((ret >= 0), ""); + ret = H5Pclose(dxfer_plist); + VRFY((ret >= 0), ""); + ret = H5Pclose(dcrt_plist); + VRFY((ret >= 0), ""); - int *matrix_out, *matrix_out1, *vector; - - int mpi_size,mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - /*set up MPI parameters */ - MPI_Comm_size(comm,&mpi_size); - MPI_Comm_rank(comm,&mpi_rank); - - /* Obtain file name */ - filename = GetTestParameters(); - - /* - * Buffers' initialization. - */ - - mdim1[0] = MSPACE1_DIM *mpi_size; - mdim[0] = MSPACE_DIM1; - mdim[1] = MSPACE_DIM2*mpi_size; - fsdim[0] = FSPACE_DIM1; - fsdim[1] = FSPACE_DIM2*mpi_size; - - vector = (int*)HDmalloc(sizeof(int)*mdim1[0]*mpi_size); - matrix_out = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size); - matrix_out1 = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size); - - HDmemset(vector,0,sizeof(int)*mdim1[0]*mpi_size); - vector[0] = vector[MSPACE1_DIM*mpi_size - 1] = -1; - for (i = 1; i < MSPACE1_DIM*mpi_size - 1; i++) vector[i] = i; - - /* Grab file access property list */ - facc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((facc_plist >= 0),""); - - /* - * Create a file. - */ - file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist); - VRFY((file >= 0),"H5Fcreate succeeded"); - - /* - * Create property list for a dataset and set up fill values. - */ - dcrt_plist = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcrt_plist >= 0),""); - - ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue); - VRFY((ret >= 0),"Fill value creation property list succeeded"); - - if(chunk_factor != 0) { - chunk_dims[0] = fsdim[0] / chunk_factor; - chunk_dims[1] = fsdim[1] / chunk_factor; - ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); - } - - /* - * - * Create dataspace for the first dataset in the disk. - * dim1 = 9 - * dim2 = 3600 - * - * - */ - fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL); - VRFY((fspaceid >= 0),"file dataspace created succeeded"); - - /* - * Create dataset in the file. Notice that creation - * property list dcrt_plist is used. - */ - datasetc = H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); - VRFY((datasetc >= 0),"dataset created succeeded"); - - dataseti = H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT); - VRFY((dataseti >= 0),"dataset created succeeded"); - - /* The First selection for FILE - * - * block (3,2) - * stride(4,3) - * count (1,768/mpi_size) - * start (0,1+768*3*mpi_rank/mpi_size) - * - */ - - start[0] = FHSTART0; - start[1] = FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1; - stride[0] = FHSTRIDE0; - stride[1] = FHSTRIDE1; - count[0] = FHCOUNT0; - count[1] = FHCOUNT1; - block[0] = FHBLOCK0; - block[1] = FHBLOCK1; - - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* The Second selection for FILE - * - * block (3,768) - * stride (1,1) - * count (1,1) - * start (4,768*mpi_rank/mpi_size) - * - */ - - start[0] = SHSTART0; - start[1] = SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank; - stride[0] = SHSTRIDE0; - stride[1] = SHSTRIDE1; - count[0] = SHCOUNT0; - count[1] = SHCOUNT1; - block[0] = SHBLOCK0; - block[1] = SHBLOCK1; - - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* - * Create dataspace for the first dataset in the memory - * dim1 = 27000 - * - */ - mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL); - VRFY((mspaceid1 >= 0),"memory dataspace created succeeded"); - - /* - * Memory space is 1-D, this is a good test to check - * whether a span-tree derived datatype needs to be built. - * block 1 - * stride 1 - * count 6912/mpi_size - * start 1 - * - */ - start[0] = MHSTART0; - stride[0] = MHSTRIDE0; - count[0] = MHCOUNT0; - block[0] = MHBLOCK0; - - ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* independent write */ - ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector); - VRFY((ret >= 0),"dataset independent write succeed"); - - dxfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxfer_plist >= 0),""); - - ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0),"MPIO data transfer property list succeed"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* collective write */ - ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector); - VRFY((ret >= 0),"dataset collective write succeed"); - - ret = H5Sclose(mspaceid1); - VRFY((ret >= 0),""); - - ret = H5Sclose(fspaceid); - VRFY((ret >= 0),""); - - /* - * Close dataset. - */ - ret = H5Dclose(datasetc); - VRFY((ret >= 0),""); - - ret = H5Dclose(dataseti); - VRFY((ret >= 0),""); - - /* - * Close the file. - */ - ret = H5Fclose(file); - VRFY((ret >= 0),""); - /* - * Close property list - */ - - ret = H5Pclose(facc_plist); - VRFY((ret >= 0),""); - ret = H5Pclose(dxfer_plist); - VRFY((ret >= 0),""); - ret = H5Pclose(dcrt_plist); - VRFY((ret >= 0),""); - - /* - * Open the file. - */ - - /*** - - For testing collective hyperslab selection write - In this test, we are using independent read to check - the correctedness of collective write compared with - independent write, - - In order to throughly test this feature, we choose - a different selection set for reading the data out. - - - ***/ - - /* Obtain file access property list with MPI-IO driver */ - facc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((facc_plist >= 0),""); - - file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); - VRFY((file >= 0),"H5Fopen succeeded"); - - /* - * Open the dataset. - */ - datasetc = H5Dopen2(file,"collect_write", H5P_DEFAULT); - VRFY((datasetc >= 0),"H5Dopen2 succeeded"); - - dataseti = H5Dopen2(file,"independ_write", H5P_DEFAULT); - VRFY((dataseti >= 0),"H5Dopen2 succeeded"); - - /* - * Get dataspace of the open dataset. - */ - fspaceid = H5Dget_space(datasetc); - VRFY((fspaceid >= 0),"file dataspace obtained succeeded"); - - fspaceid1 = H5Dget_space(dataseti); - VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded"); - - - /* The First selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (1,2+768*mpi_rank/mpi_size) - * - */ - start[0] = RFFHSTART0; - start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1; - block[0] = RFFHBLOCK0; - block[1] = RFFHBLOCK1; - stride[0] = RFFHSTRIDE0; - stride[1] = RFFHSTRIDE1; - count[0] = RFFHCOUNT0; - count[1] = RFFHCOUNT1; - - - /* The first selection of the dataset generated by collective write */ - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* The first selection of the dataset generated by independent write */ - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* The Second selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,1536/mpi_size) - * start (2,4+1536*mpi_rank/mpi_size) - * - */ - - start[0] = RFSHSTART0; - start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank; - block[0] = RFSHBLOCK0; - block[1] = RFSHBLOCK1; - stride[0] = RFSHSTRIDE0; - stride[1] = RFSHSTRIDE0; - count[0] = RFSHCOUNT0; - count[1] = RFSHCOUNT1; - - /* The second selection of the dataset generated by collective write */ - ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* The second selection of the dataset generated by independent write */ - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* - * Create memory dataspace. - * rank = 2 - * mdim1 = 9 - * mdim2 = 3600 - * - */ - mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); - - /* - * Select two hyperslabs in memory. Hyperslabs has the same - * size and shape as the selected hyperslabs for the file dataspace - * Only the starting point is different. - * The first selection - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (0,768*mpi_rank/mpi_size) - * - */ - - - start[0] = RMFHSTART0; - start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1; - block[0] = RMFHBLOCK0; - block[1] = RMFHBLOCK1; - stride[0] = RMFHSTRIDE0; - stride[1] = RMFHSTRIDE1; - count[0] = RMFHCOUNT0; - count[1] = RMFHCOUNT1; - - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* - * Select two hyperslabs in memory. Hyperslabs has the same - * size and shape as the selected hyperslabs for the file dataspace - * Only the starting point is different. - * The second selection - * block (1,1) - * stride(1,1) - * count (3,1536/mpi_size) - * start (1,2+1536*mpi_rank/mpi_size) - * - */ - start[0] = RMSHSTART0; - start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1; - block[0] = RMSHBLOCK0; - block[1] = RMSHBLOCK1; - stride[0] = RMSHSTRIDE0; - stride[1] = RMSHSTRIDE1; - count[0] = RMSHCOUNT0; - count[1] = RMSHCOUNT1; - - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* - * Initialize data buffer. - */ - - HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); - HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); - /* - * Read data back to the buffer matrix_out. - */ - - ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, - H5P_DEFAULT, matrix_out); - VRFY((ret >= 0),"H5D independent read succeed"); - - - ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, - H5P_DEFAULT, matrix_out1); - VRFY((ret >= 0),"H5D independent read succeed"); - - ret = 0; - - for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){ - if(matrix_out[i]!=matrix_out1[i]) ret = -1; - if(ret < 0) break; + /* + * Open the file. + */ + + /*** + + For testing collective hyperslab selection write + In this test, we are using independent read to check + the correctedness of collective write compared with + independent write, + + In order to thoroughly test this feature, we choose + a different selection set for reading the data out. + + + ***/ + + /* Obtain file access property list with MPI-IO driver */ + facc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((facc_plist >= 0), ""); + + file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); + VRFY((file >= 0), "H5Fopen succeeded"); + + /* + * Open the dataset. + */ + datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT); + VRFY((datasetc >= 0), "H5Dopen2 succeeded"); + + dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT); + VRFY((dataseti >= 0), "H5Dopen2 succeeded"); + + /* + * Get dataspace of the open dataset. + */ + fspaceid = H5Dget_space(datasetc); + VRFY((fspaceid >= 0), "file dataspace obtained succeeded"); + + fspaceid1 = H5Dget_space(dataseti); + VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded"); + + /* The First selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (1,2+768*mpi_rank/mpi_size) + * + */ + start[0] = RFFHSTART0; + start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1); + block[0] = RFFHBLOCK0; + block[1] = RFFHBLOCK1; + stride[0] = RFFHSTRIDE0; + stride[1] = RFFHSTRIDE1; + count[0] = RFFHCOUNT0; + count[1] = RFFHCOUNT1; + + /* The first selection of the dataset generated by collective write */ + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The first selection of the dataset generated by independent write */ + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The Second selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,1536/mpi_size) + * start (2,4+1536*mpi_rank/mpi_size) + * + */ + + start[0] = RFSHSTART0; + start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank); + block[0] = RFSHBLOCK0; + block[1] = RFSHBLOCK1; + stride[0] = RFSHSTRIDE0; + stride[1] = RFSHSTRIDE0; + count[0] = RFSHCOUNT0; + count[1] = RFSHCOUNT1; + + /* The second selection of the dataset generated by collective write */ + ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The second selection of the dataset generated by independent write */ + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Create memory dataspace. + * rank = 2 + * mdim1 = 9 + * mdim2 = 3600 + * + */ + mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace + * Only the starting point is different. + * The first selection + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (0,768*mpi_rank/mpi_size) + * + */ + + start[0] = RMFHSTART0; + start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1); + block[0] = RMFHBLOCK0; + block[1] = RMFHBLOCK1; + stride[0] = RMFHSTRIDE0; + stride[1] = RMFHSTRIDE1; + count[0] = RMFHCOUNT0; + count[1] = RMFHCOUNT1; + + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace + * Only the starting point is different. + * The second selection + * block (1,1) + * stride(1,1) + * count (3,1536/mpi_size) + * start (1,2+1536*mpi_rank/mpi_size) + * + */ + start[0] = RMSHSTART0; + start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1); + block[0] = RMSHBLOCK0; + block[1] = RMSHBLOCK1; + stride[0] = RMSHSTRIDE0; + stride[1] = RMSHSTRIDE1; + count[0] = RMSHCOUNT0; + count[1] = RMSHCOUNT1; + + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Initialize data buffer. + */ + + HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + /* + * Read data back to the buffer matrix_out. + */ + + ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out); + VRFY((ret >= 0), "H5D independent read succeed"); + + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1); + VRFY((ret >= 0), "H5D independent read succeed"); + + ret = 0; + + for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) { + if (matrix_out[i] != matrix_out1[i]) + ret = -1; + if (ret < 0) + break; } - VRFY((ret >= 0),"H5D irregular collective write succeed"); + VRFY((ret >= 0), "H5D irregular collective write succeed"); - /* - * Close memory file and memory dataspaces. - */ - ret = H5Sclose(mspaceid); - VRFY((ret >= 0),""); - ret = H5Sclose(fspaceid); - VRFY((ret >= 0),""); + /* + * Close memory file and memory dataspaces. + */ + ret = H5Sclose(mspaceid); + VRFY((ret >= 0), ""); + ret = H5Sclose(fspaceid); + VRFY((ret >= 0), ""); - /* - * Close dataset. - */ - ret = H5Dclose(dataseti); - VRFY((ret >= 0),""); + /* + * Close dataset. + */ + ret = H5Dclose(dataseti); + VRFY((ret >= 0), ""); - ret = H5Dclose(datasetc); - VRFY((ret >= 0),""); + ret = H5Dclose(datasetc); + VRFY((ret >= 0), ""); - /* - * Close property list - */ + /* + * Close property list + */ - ret = H5Pclose(facc_plist); - VRFY((ret >= 0),""); + ret = H5Pclose(facc_plist); + VRFY((ret >= 0), ""); + /* + * Close the file. + */ + ret = H5Fclose(file); + VRFY((ret >= 0), ""); - /* - * Close the file. - */ - ret = H5Fclose(file); - VRFY((ret >= 0),""); + if (vector) + HDfree(vector); + if (matrix_out) + HDfree(matrix_out); + if (matrix_out1) + HDfree(matrix_out1); - return ; + return; } /*------------------------------------------------------------------------- - * Function: coll_read_test + * Function: coll_read_test * - * Purpose: To test the collectively irregular hyperslab read in chunk - storage + * Purpose: To test the collectively irregular hyperslab read in chunk + * storage * Input: number of chunks on each dimension - if number is equal to 0, contiguous storage - * Return: Success: 0 + * if number is equal to 0, contiguous storage + * Return: Success: 0 * - * Failure: -1 + * Failure: -1 * - * Programmer: Unknown - * Dec 2nd, 2004 + * Programmer: Unknown + * Dec 2nd, 2004 * - * Modifications: Oct 18th, 2005 - * Note: This test must be used with the correpsonding - coll_write_test. *------------------------------------------------------------------------- */ static void -coll_read_test(int chunk_factor) +coll_read_test(void) { - const char *filename; - hid_t facc_plist,dxfer_plist; - hid_t file, dataseti; /* File and dataset identifiers */ - hid_t mspaceid, fspaceid1; /* Dataspace identifiers */ + const char *filename; + hid_t facc_plist, dxfer_plist; + hid_t file, dataseti; /* File and dataset identifiers */ + hid_t mspaceid, fspaceid1; /* Dataspace identifiers */ + /* Dimension sizes of the dataset (on disk) */ + hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we + * read selection from the dataset on the disk + */ - /* Dimension sizes of the dataset (on disk) */ -#if 0 - hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the - dataset in memory when we - read selection from the - dataset on the disk */ + hsize_t start[2]; /* Start of hyperslab */ + hsize_t stride[2]; /* Stride of hyperslab */ + hsize_t count[2]; /* Block count */ + hsize_t block[2]; /* Block sizes */ + herr_t ret; -#endif - hsize_t mdim[2]; - hsize_t start[2]; /* Start of hyperslab */ - hsize_t stride[2]; /* Stride of hyperslab */ - hsize_t count[2]; /* Block count */ - hsize_t block[2]; /* Block sizes */ - herr_t ret; - - unsigned i; - - int *matrix_out; - int *matrix_out1; -#if 0 - int matrix_out[MSPACE_DIM1][MSPACE_DIM2]; - int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the - dataset */ + int i; -#endif - int mpi_size,mpi_rank; - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - - /*set up MPI parameters */ - MPI_Comm_size(comm,&mpi_size); - MPI_Comm_rank(comm,&mpi_rank); - - - /* Obtain file name */ - filename = GetTestParameters(); - - - /* Initialize the buffer */ - - mdim[0] = MSPACE_DIM1; - mdim[1] = MSPACE_DIM2*mpi_size; - matrix_out =(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); - matrix_out1=(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); - - /*** For testing collective hyperslab selection read ***/ - - /* Obtain file access property list */ - facc_plist = create_faccess_plist(comm, info, facc_type); - VRFY((facc_plist >= 0),""); - - /* - * Open the file. - */ - file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); - VRFY((file >= 0),"H5Fopen succeeded"); - - /* - * Open the dataset. - */ - dataseti = H5Dopen2(file,"independ_write", H5P_DEFAULT); - VRFY((dataseti >= 0),"H5Dopen2 succeeded"); - - /* - * Get dataspace of the open dataset. - */ - fspaceid1 = H5Dget_space(dataseti); - VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded"); - - /* The First selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (1,2+768*mpi_rank/mpi_size) - * - */ - start[0] = RFFHSTART0; - start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1; - block[0] = RFFHBLOCK0; - block[1] = RFFHBLOCK1; - stride[0] = RFFHSTRIDE0; - stride[1] = RFFHSTRIDE1; - count[0] = RFFHCOUNT0; - count[1] = RFFHCOUNT1; - - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* The Second selection for FILE to read - * - * block (1,1) - * stride(1.1) - * count (3,1536/mpi_size) - * start (2,4+1536*mpi_rank/mpi_size) - * - */ - start[0] = RFSHSTART0; - start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank; - block[0] = RFSHBLOCK0; - block[1] = RFSHBLOCK1; - stride[0] = RFSHSTRIDE0; - stride[1] = RFSHSTRIDE0; - count[0] = RFSHCOUNT0; - count[1] = RFSHCOUNT1; - - ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - - /* - * Create memory dataspace. - */ - mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); - - /* - * Select two hyperslabs in memory. Hyperslabs has the same - * size and shape as the selected hyperslabs for the file dataspace. - * Only the starting point is different. - * The first selection - * block (1,1) - * stride(1.1) - * count (3,768/mpi_size) - * start (0,768*mpi_rank/mpi_size) - * - */ - - start[0] = RMFHSTART0; - start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1; - block[0] = RMFHBLOCK0; - block[1] = RMFHBLOCK1; - stride[0] = RMFHSTRIDE0; - stride[1] = RMFHSTRIDE1; - count[0] = RMFHCOUNT0; - count[1] = RMFHCOUNT1; - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - /* - * Select two hyperslabs in memory. Hyperslabs has the same - * size and shape as the selected hyperslabs for the file dataspace - * Only the starting point is different. - * The second selection - * block (1,1) - * stride(1,1) - * count (3,1536/mpi_size) - * start (1,2+1536*mpi_rank/mpi_size) - * - */ - start[0] = RMSHSTART0; - start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1; - block[0] = RMSHBLOCK0; - block[1] = RMSHBLOCK1; - stride[0] = RMSHSTRIDE0; - stride[1] = RMSHSTRIDE1; - count[0] = RMSHCOUNT0; - count[1] = RMSHCOUNT1; - ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); - VRFY((ret >= 0),"hyperslab selection succeeded"); - - - /* - * Initialize data buffer. - */ - - HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); - HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size); - - /* - * Read data back to the buffer matrix_out. - */ - - dxfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxfer_plist >= 0),""); - - ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0),"MPIO data transfer property list succeed"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* Collective read */ - ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, - dxfer_plist, matrix_out); - VRFY((ret >= 0),"H5D collecive read succeed"); - - ret = H5Pclose(dxfer_plist); - VRFY((ret >= 0),""); - - /* Independent read */ - ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, - H5P_DEFAULT, matrix_out1); - VRFY((ret >= 0),"H5D independent read succeed"); - - ret = 0; - for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){ - if(matrix_out[i]!=matrix_out1[i])ret = -1; - if(ret < 0) break; - } - VRFY((ret >= 0),"H5D contiguous irregular collective read succeed"); - - /* - * Close memory file and memory dataspaces. - */ - ret = H5Sclose(mspaceid); - VRFY((ret >= 0),""); - ret = H5Sclose(fspaceid1); - VRFY((ret >= 0),""); - - /* - * Close dataset. - */ - ret = H5Dclose(dataseti); - VRFY((ret >= 0),""); - - /* - * Close property list - */ - ret = H5Pclose(facc_plist); - VRFY((ret >= 0),""); - - - /* - * Close the file. - */ - ret = H5Fclose(file); - VRFY((ret >= 0),""); - - return ; -} + int *matrix_out; + int *matrix_out1; /* Buffer to read from the dataset */ + + int mpi_size, mpi_rank; + + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + + /*set up MPI parameters */ + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Obtain file name */ + filename = GetTestParameters(); + + /* Initialize the buffer */ + + mdim[0] = MSPACE_DIM1; + mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size); + matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + + /*** For testing collective hyperslab selection read ***/ + + /* Obtain file access property list */ + facc_plist = create_faccess_plist(comm, info, facc_type); + VRFY((facc_plist >= 0), ""); + + /* + * Open the file. + */ + file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist); + VRFY((file >= 0), "H5Fopen succeeded"); + + /* + * Open the dataset. + */ + dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT); + VRFY((dataseti >= 0), "H5Dopen2 succeeded"); + + /* + * Get dataspace of the open dataset. + */ + fspaceid1 = H5Dget_space(dataseti); + VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded"); + + /* The First selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (1,2+768*mpi_rank/mpi_size) + * + */ + start[0] = RFFHSTART0; + start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1); + block[0] = RFFHBLOCK0; + block[1] = RFFHBLOCK1; + stride[0] = RFFHSTRIDE0; + stride[1] = RFFHSTRIDE1; + count[0] = RFFHCOUNT0; + count[1] = RFFHCOUNT1; + + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* The Second selection for FILE to read + * + * block (1,1) + * stride(1.1) + * count (3,1536/mpi_size) + * start (2,4+1536*mpi_rank/mpi_size) + * + */ + start[0] = RFSHSTART0; + start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank); + block[0] = RFSHBLOCK0; + block[1] = RFSHBLOCK1; + stride[0] = RFSHSTRIDE0; + stride[1] = RFSHSTRIDE0; + count[0] = RFSHCOUNT0; + count[1] = RFSHCOUNT1; + + ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Create memory dataspace. + */ + mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace. + * Only the starting point is different. + * The first selection + * block (1,1) + * stride(1.1) + * count (3,768/mpi_size) + * start (0,768*mpi_rank/mpi_size) + * + */ + + start[0] = RMFHSTART0; + start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1); + block[0] = RMFHBLOCK0; + block[1] = RMFHBLOCK1; + stride[0] = RMFHSTRIDE0; + stride[1] = RMFHSTRIDE1; + count[0] = RMFHCOUNT0; + count[1] = RMFHCOUNT1; + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Select two hyperslabs in memory. Hyperslabs has the same + * size and shape as the selected hyperslabs for the file dataspace + * Only the starting point is different. + * The second selection + * block (1,1) + * stride(1,1) + * count (3,1536/mpi_size) + * start (1,2+1536*mpi_rank/mpi_size) + * + */ + start[0] = RMSHSTART0; + start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1); + block[0] = RMSHBLOCK0; + block[1] = RMSHBLOCK1; + stride[0] = RMSHSTRIDE0; + stride[1] = RMSHSTRIDE1; + count[0] = RMSHCOUNT0; + count[1] = RMSHCOUNT1; + ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "hyperslab selection succeeded"); + + /* + * Initialize data buffer. + */ + + HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size); + + /* + * Read data back to the buffer matrix_out. + */ + + dxfer_plist = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxfer_plist >= 0), ""); + ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE); + VRFY((ret >= 0), "MPIO data transfer property list succeed"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO) { + ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "set independent IO collectively succeeded"); + } + + /* Collective read */ + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out); + VRFY((ret >= 0), "H5D collecive read succeed"); + + ret = H5Pclose(dxfer_plist); + VRFY((ret >= 0), ""); + + /* Independent read */ + ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1); + VRFY((ret >= 0), "H5D independent read succeed"); + + ret = 0; + for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) { + if (matrix_out[i] != matrix_out1[i]) + ret = -1; + if (ret < 0) + break; + } + VRFY((ret >= 0), "H5D contiguous irregular collective read succeed"); + + /* + * Free read buffers. + */ + HDfree(matrix_out); + HDfree(matrix_out1); + + /* + * Close memory file and memory dataspaces. + */ + ret = H5Sclose(mspaceid); + VRFY((ret >= 0), ""); + ret = H5Sclose(fspaceid1); + VRFY((ret >= 0), ""); + + /* + * Close dataset. + */ + ret = H5Dclose(dataseti); + VRFY((ret >= 0), ""); + + /* + * Close property list + */ + ret = H5Pclose(facc_plist); + VRFY((ret >= 0), ""); + + /* + * Close the file. + */ + ret = H5Fclose(file); + VRFY((ret >= 0), ""); + + return; +} /**************************************************************** ** -** lower_dim_size_comp_test__select_checker_board(): +** lower_dim_size_comp_test__select_checker_board(): ** -** Given a data space of tgt_rank, and dimensions: +** Given a dataspace of tgt_rank, and dimensions: ** -** (mpi_size + 1), edge_size, ... , edge_size +** (mpi_size + 1), edge_size, ... , edge_size ** -** edge_size, and a checker_edge_size, select a checker -** board selection of a sel_rank (sel_rank < tgt_rank) -** dimensional slice through the data space parallel to the -** sel_rank fastest changing indicies, with origin (in the -** higher indicies) as indicated by the start array. +** edge_size, and a checker_edge_size, select a checker +** board selection of a sel_rank (sel_rank < tgt_rank) +** dimensional slice through the dataspace parallel to the +** sel_rank fastest changing indices, with origin (in the +** higher indices) as indicated by the start array. ** -** Note that this function, is hard coded to presume a -** maximum data space rank of 5. +** Note that this function, is hard coded to presume a +** maximum dataspace rank of 5. ** -** While this maximum is declared as a constant, increasing -** it will require extensive coding in addition to changing +** While this maximum is declared as a constant, increasing +** it will require extensive coding in addition to changing ** the value of the constant. ** -** JRM -- 11/11/09 +** JRM -- 11/11/09 ** ****************************************************************/ -#define LDSCT_DS_RANK 5 +#define LDSCT_DS_RANK 5 +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG #define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0 +#endif #define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0 static void -lower_dim_size_comp_test__select_checker_board( - const int mpi_rank, - const hid_t tgt_sid, - const int tgt_rank, - const hsize_t dims[LDSCT_DS_RANK], - const int checker_edge_size, - const int sel_rank, - hsize_t sel_start[]) +lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank, + const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size, + const int sel_rank, hsize_t sel_start[]) { -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - const char * fcnName = - "lower_dim_size_comp_test__select_checker_board():"; -#endif - hbool_t first_selection = TRUE; - int i, j, k, l, m; - int ds_offset; - int sel_offset; - const int test_max_rank = LDSCT_DS_RANK; /* must update code if */ - /* this changes */ - hsize_t base_count; - hsize_t offset_count; - hsize_t start[LDSCT_DS_RANK]; - hsize_t stride[LDSCT_DS_RANK]; - hsize_t count[LDSCT_DS_RANK]; - hsize_t block[LDSCT_DS_RANK]; - herr_t ret; /* Generic return value */ - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", - fcnName, mpi_rank, (int)dims[0], (int)dims[1], (int)dims[2], - (int)dims[3], (int)dims[4], checker_edge_size); +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + const char *fcnName = "lower_dim_size_comp_test__select_checker_board():"; +#endif + hbool_t first_selection = TRUE; + int i, j, k, l, m; + int ds_offset; + int sel_offset; + const int test_max_rank = LDSCT_DS_RANK; /* must update code if */ + /* this changes */ + hsize_t base_count; + hsize_t offset_count; + hsize_t start[LDSCT_DS_RANK]; + hsize_t stride[LDSCT_DS_RANK]; + hsize_t count[LDSCT_DS_RANK]; + hsize_t block[LDSCT_DS_RANK]; + herr_t ret; /* Generic return value */ + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank, + (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size); } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - HDassert( 0 < checker_edge_size ); - HDassert( 0 < sel_rank ); - HDassert( sel_rank <= tgt_rank ); - HDassert( tgt_rank <= test_max_rank ); - HDassert( test_max_rank <= LDSCT_DS_RANK ); + HDassert(0 < checker_edge_size); + HDassert(0 < sel_rank); + HDassert(sel_rank <= tgt_rank); + HDassert(tgt_rank <= test_max_rank); + HDassert(test_max_rank <= LDSCT_DS_RANK); sel_offset = test_max_rank - sel_rank; - HDassert( sel_offset >= 0 ); + HDassert(sel_offset >= 0); ds_offset = test_max_rank - tgt_rank; - HDassert( ds_offset >= 0 ); - HDassert( ds_offset <= sel_offset ); - - HDassert( (hsize_t)checker_edge_size <= dims[sel_offset] ); - HDassert( dims[sel_offset] == 10 ); - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", - fcnName, mpi_rank, sel_rank, sel_offset); - HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", - fcnName, mpi_rank, tgt_rank, ds_offset); + HDassert(ds_offset >= 0); + HDassert(ds_offset <= sel_offset); + + HDassert((hsize_t)checker_edge_size <= dims[sel_offset]); + HDassert(dims[sel_offset] == 10); + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset); + HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset); } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ /* First, compute the base count (which assumes start == 0 * for the associated offset) and offset_count (which @@ -1038,264 +975,220 @@ lower_dim_size_comp_test__select_checker_board( * pre-C99 compilers again. */ - base_count = dims[sel_offset] / (checker_edge_size * 2); + base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2); - if ( (dims[sel_rank] % (checker_edge_size * 2)) > 0 ) { + if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) { base_count++; } - offset_count = - (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / - ((hsize_t)(checker_edge_size * 2))); + offset_count = + (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2))); - if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) % - ((hsize_t)(checker_edge_size * 2))) > 0 ) { + if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) { offset_count++; } -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", - fcnName, mpi_rank, base_count, offset_count); +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count, + offset_count); } -#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ +#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ /* Now set up the stride and block arrays, and portions of the start - * and count arrays that will not be altered during the selection of + * and count arrays that will not be altered during the selection of * the checker board. */ i = 0; - while ( i < ds_offset ) { + while (i < ds_offset) { /* these values should never be used */ - start[i] = 0; + start[i] = 0; stride[i] = 0; - count[i] = 0; - block[i] = 0; + count[i] = 0; + block[i] = 0; i++; } - while ( i < sel_offset ) { + while (i < sel_offset) { - start[i] = sel_start[i]; + start[i] = sel_start[i]; stride[i] = 2 * dims[i]; - count[i] = 1; - block[i] = 1; + count[i] = 1; + block[i] = 1; i++; } - while ( i < test_max_rank ) { + while (i < test_max_rank) { stride[i] = (hsize_t)(2 * checker_edge_size); - block[i] = (hsize_t)checker_edge_size; + block[i] = (hsize_t)checker_edge_size; i++; } - + i = 0; do { - if ( 0 >= sel_offset ) { + if (0 >= sel_offset) { - if ( i == 0 ) { + if (i == 0) { start[0] = 0; count[0] = base_count; - - } else { + } + else { start[0] = (hsize_t)checker_edge_size; count[0] = offset_count; - } } j = 0; - do { - if ( 1 >= sel_offset ) { + do { + if (1 >= sel_offset) { - if ( j == 0 ) { + if (j == 0) { start[1] = 0; count[1] = base_count; - - } else { + } + else { start[1] = (hsize_t)checker_edge_size; count[1] = offset_count; - } } k = 0; do { - if ( 2 >= sel_offset ) { + if (2 >= sel_offset) { - if ( k == 0 ) { + if (k == 0) { start[2] = 0; count[2] = base_count; - - } else { + } + else { start[2] = (hsize_t)checker_edge_size; count[2] = offset_count; - } } l = 0; do { - if ( 3 >= sel_offset ) { + if (3 >= sel_offset) { - if ( l == 0 ) { + if (l == 0) { start[3] = 0; count[3] = base_count; - - } else { + } + else { start[3] = (hsize_t)checker_edge_size; count[3] = offset_count; - } } m = 0; do { - if ( 4 >= sel_offset ) { + if (4 >= sel_offset) { - if ( m == 0 ) { + if (m == 0) { start[4] = 0; count[4] = base_count; - - } else { + } + else { start[4] = (hsize_t)checker_edge_size; count[4] = offset_count; - } } - if ( ((i + j + k + l + m) % 2) == 0 ) { - -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if ( mpi_rank == - LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - - HDfprintf(stdout, - "%s%d: *** first_selection = %d ***\n", - fcnName, mpi_rank, (int)first_selection); - HDfprintf(stdout, - "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", - fcnName, mpi_rank, i, j, k, l, m); - HDfprintf(stdout, - "%s:%d: start = %d %d %d %d %d.\n", - fcnName, mpi_rank, - (int)start[0], (int)start[1], - (int)start[2], (int)start[3], - (int)start[4]); - HDfprintf(stdout, - "%s:%d: stride = %d %d %d %d %d.\n", - fcnName, mpi_rank, - (int)stride[0], (int)stride[1], - (int)stride[2], (int)stride[3], - (int)stride[4]); - HDfprintf(stdout, - "%s:%d: count = %d %d %d %d %d.\n", - fcnName, mpi_rank, - (int)count[0], (int)count[1], - (int)count[2], (int)count[3], - (int)count[4]); - HDfprintf(stdout, - "%s:%d: block = %d %d %d %d %d.\n", - fcnName, mpi_rank, - (int)block[0], (int)block[1], - (int)block[2], (int)block[3], - (int)block[4]); - HDfprintf(stdout, - "%s:%d: n-cube extent dims = %d.\n", - fcnName, mpi_rank, - H5Sget_simple_extent_ndims(tgt_sid)); - HDfprintf(stdout, - "%s:%d: selection rank = %d.\n", - fcnName, mpi_rank, sel_rank); + if (((i + j + k + l + m) % 2) == 0) { + +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + + HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank, + (int)first_selection); + HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, + j, k, l, m); + HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)start[0], (int)start[1], (int)start[2], (int)start[3], + (int)start[4]); + HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3], + (int)stride[4]); + HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)count[0], (int)count[1], (int)count[2], (int)count[3], + (int)count[4]); + HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank, + (int)block[0], (int)block[1], (int)block[2], (int)block[3], + (int)block[4]); + HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank, + H5Sget_simple_extent_ndims(tgt_sid)); + HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, + sel_rank); } #endif - if ( first_selection ) { + if (first_selection) { - first_selection = FALSE; + first_selection = FALSE; + + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]), + &(stride[ds_offset]), &(count[ds_offset]), + &(block[ds_offset])); - ret = H5Sselect_hyperslab - ( - tgt_sid, - H5S_SELECT_SET, - &(start[ds_offset]), - &(stride[ds_offset]), - &(count[ds_offset]), - &(block[ds_offset]) - ); - VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded"); + } + else { - } else { - - ret = H5Sselect_hyperslab - ( - tgt_sid, - H5S_SELECT_OR, - &(start[ds_offset]), - &(stride[ds_offset]), - &(count[ds_offset]), - &(block[ds_offset]) - ); - - VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]), + &(stride[ds_offset]), &(count[ds_offset]), + &(block[ds_offset])); + VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded"); } } m++; - } while ( ( m <= 1 ) && - ( 4 >= sel_offset ) ); + } while ((m <= 1) && (4 >= sel_offset)); l++; - } while ( ( l <= 1 ) && - ( 3 >= sel_offset ) ); + } while ((l <= 1) && (3 >= sel_offset)); k++; - } while ( ( k <= 1 ) && - ( 2 >= sel_offset ) ); + } while ((k <= 1) && (2 >= sel_offset)); j++; - } while ( ( j <= 1 ) && - ( 1 >= sel_offset ) ); - + } while ((j <= 1) && (1 >= sel_offset)); i++; - } while ( ( i <= 1 ) && - ( 0 >= sel_offset ) ); + } while ((i <= 1) && (0 >= sel_offset)); -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", - fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid)); +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); } #endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ - /* Clip the selection back to the data space proper. */ + /* Clip the selection back to the dataspace proper. */ - for ( i = 0; i < test_max_rank; i++ ) { + for (i = 0; i < test_max_rank; i++) { start[i] = 0; stride[i] = dims[i]; @@ -1303,15 +1196,14 @@ lower_dim_size_comp_test__select_checker_board( block[i] = dims[i]; } - ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, - start, stride, count, block); + ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block); VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded"); -#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", - fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid)); +#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(tgt_sid)); HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank); } #endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */ @@ -1320,345 +1212,316 @@ lower_dim_size_comp_test__select_checker_board( } /* lower_dim_size_comp_test__select_checker_board() */ - /**************************************************************** ** -** lower_dim_size_comp_test__verify_data(): +** lower_dim_size_comp_test__verify_data(): ** -** Examine the supplied buffer to see if it contains the -** expected data. Return TRUE if it does, and FALSE +** Examine the supplied buffer to see if it contains the +** expected data. Return TRUE if it does, and FALSE ** otherwise. ** -** The supplied buffer is presumed to this process's slice -** of the target data set. Each such slice will be an -** n-cube of rank (rank -1) and the supplied edge_size with -** origin (mpi_rank, 0, ... , 0) in the target data set. +** The supplied buffer is presumed to this process's slice +** of the target data set. Each such slice will be an +** n-cube of rank (rank -1) and the supplied edge_size with +** origin (mpi_rank, 0, ... , 0) in the target data set. ** -** Further, the buffer is presumed to be the result of reading -** or writing a checker board selection of an m (1 <= m < +** Further, the buffer is presumed to be the result of reading +** or writing a checker board selection of an m (1 <= m < ** rank) dimensional slice through this processes slice -** of the target data set. Also, this slice must be parallel -** to the fastest changing indicies. +** of the target data set. Also, this slice must be parallel +** to the fastest changing indices. ** -** It is further presumed that the buffer was zeroed before -** the read/write, and that the full target data set (i.e. -** the buffer/data set for all processes) was initialized -** with the natural numbers listed in order from the origin -** along the fastest changing axis. +** It is further presumed that the buffer was zeroed before +** the read/write, and that the full target data set (i.e. +** the buffer/data set for all processes) was initialized +** with the natural numbers listed in order from the origin +** along the fastest changing axis. ** ** Thus for a 20x10x10 dataset, the value stored in location -** (x, y, z) (assuming that z is the fastest changing index -** and x the slowest) is assumed to be: +** (x, y, z) (assuming that z is the fastest changing index +** and x the slowest) is assumed to be: ** -** (10 * 10 * x) + (10 * y) + z +** (10 * 10 * x) + (10 * y) + z ** -** Further, supposing that this is process 10, this process's -** slice of the dataset would be a 10 x 10 2-cube with origin -** (10, 0, 0) in the data set, and would be initialize (prior -** to the checkerboard selection) as follows: +** Further, supposing that this is process 10, this process's +** slice of the dataset would be a 10 x 10 2-cube with origin +** (10, 0, 0) in the data set, and would be initialize (prior +** to the checkerboard selection) as follows: ** -** 1000, 1001, 1002, ... 1008, 1009 -** 1010, 1011, 1012, ... 1018, 1019 -** . . . . . -** . . . . . -** . . . . . -** 1090, 1091, 1092, ... 1098, 1099 +** 1000, 1001, 1002, ... 1008, 1009 +** 1010, 1011, 1012, ... 1018, 1019 +** . . . . . +** . . . . . +** . . . . . +** 1090, 1091, 1092, ... 1098, 1099 ** -** In the case of a read from the processors slice of another -** data set of different rank, the values expected will have -** to be adjusted accordingly. This is done via the -** first_expected_val parameter. +** In the case of a read from the processors slice of another +** data set of different rank, the values expected will have +** to be adjusted accordingly. This is done via the +** first_expected_val parameter. ** -** Finally, the function presumes that the first element -** of the buffer resides either at the origin of either -** a selected or an unselected checker. (Translation: -** if partial checkers appear in the buffer, they will -** intersect the edges of the n-cube oposite the origin.) +** Finally, the function presumes that the first element +** of the buffer resides either at the origin of either +** a selected or an unselected checker. (Translation: +** if partial checkers appear in the buffer, they will +** intersect the edges of the n-cube opposite the origin.) ** ****************************************************************/ #define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0 static hbool_t -lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr, -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG +lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr, +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG const int mpi_rank, #endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ - const int rank, - const int edge_size, - const int checker_edge_size, - uint32_t first_expected_val, - hbool_t buf_starts_in_checker) + const int rank, const int edge_size, const int checker_edge_size, + uint32_t first_expected_val, hbool_t buf_starts_in_checker) { #if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - const char * fcnName = - "lower_dim_size_comp_test__verify_data():"; + const char *fcnName = "lower_dim_size_comp_test__verify_data():"; #endif - hbool_t good_data = TRUE; - hbool_t in_checker; - hbool_t start_in_checker[5]; - uint32_t expected_value; - uint32_t * val_ptr; - int i, j, k, l, m; /* to track position in n-cube */ - int v, w, x, y, z; /* to track position in checker */ + hbool_t good_data = TRUE; + hbool_t in_checker; + hbool_t start_in_checker[5]; + uint32_t expected_value; + uint32_t *val_ptr; + int i, j, k, l, m; /* to track position in n-cube */ + int v, w, x, y, z; /* to track position in checker */ const int test_max_rank = 5; /* code changes needed if this is increased */ - HDassert( buf_ptr != NULL ); - HDassert( 0 < rank ); - HDassert( rank <= test_max_rank ); - HDassert( edge_size >= 6 ); - HDassert( 0 < checker_edge_size ); - HDassert( checker_edge_size <= edge_size ); - HDassert( test_max_rank <= LDSCT_DS_RANK ); + HDassert(buf_ptr != NULL); + HDassert(0 < rank); + HDassert(rank <= test_max_rank); + HDassert(edge_size >= 6); + HDassert(0 < checker_edge_size); + HDassert(checker_edge_size <= edge_size); + HDassert(test_max_rank <= LDSCT_DS_RANK); -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank); HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank); HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size); - HDfprintf(stdout, "%s checker_edge_size = %d.\n", - fcnName, checker_edge_size); - HDfprintf(stdout, "%s first_expected_val = %d.\n", - fcnName, (int)first_expected_val); - HDfprintf(stdout, "%s starts_in_checker = %d.\n", - fcnName, (int)buf_starts_in_checker); + HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size); + HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val); + HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker); } #endif - val_ptr = buf_ptr; + val_ptr = buf_ptr; expected_value = first_expected_val; - i = 0; - v = 0; + i = 0; + v = 0; start_in_checker[0] = buf_starts_in_checker; - do - { - if ( v >= checker_edge_size ) { + do { + if (v >= checker_edge_size) { - start_in_checker[0] = ! start_in_checker[0]; - v = 0; + start_in_checker[0] = !start_in_checker[0]; + v = 0; } - j = 0; - w = 0; + j = 0; + w = 0; start_in_checker[1] = start_in_checker[0]; - do - { - if ( w >= checker_edge_size ) { + do { + if (w >= checker_edge_size) { - start_in_checker[1] = ! start_in_checker[1]; - w = 0; + start_in_checker[1] = !start_in_checker[1]; + w = 0; } - k = 0; - x = 0; + k = 0; + x = 0; start_in_checker[2] = start_in_checker[1]; - do - { - if ( x >= checker_edge_size ) { + do { + if (x >= checker_edge_size) { - start_in_checker[2] = ! start_in_checker[2]; - x = 0; + start_in_checker[2] = !start_in_checker[2]; + x = 0; } - l = 0; - y = 0; + l = 0; + y = 0; start_in_checker[3] = start_in_checker[2]; - do - { - if ( y >= checker_edge_size ) { + do { + if (y >= checker_edge_size) { - start_in_checker[3] = ! start_in_checker[3]; - y = 0; + start_in_checker[3] = !start_in_checker[3]; + y = 0; } m = 0; z = 0; -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if ( mpi_rank == - LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m); } #endif in_checker = start_in_checker[3]; - do - { -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if ( mpi_rank == - LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { + do { +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { HDfprintf(stdout, " %d", (int)(*val_ptr)); } #endif - if ( z >= checker_edge_size ) { + if (z >= checker_edge_size) { - in_checker = ! in_checker; - z = 0; + in_checker = !in_checker; + z = 0; } - - if ( in_checker ) { - - if ( *val_ptr != expected_value ) { + + if (in_checker) { + + if (*val_ptr != expected_value) { good_data = FALSE; } - + /* zero out buffer for re-use */ *val_ptr = 0; - - } else if ( *val_ptr != 0 ) { + } + else if (*val_ptr != 0) { good_data = FALSE; - + /* zero out buffer for re-use */ *val_ptr = 0; - } val_ptr++; expected_value++; m++; z++; - - } while ( ( rank >= (test_max_rank - 4) ) && - ( m < edge_size ) ); -#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if ( mpi_rank == - LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { + + } while ((rank >= (test_max_rank - 4)) && (m < edge_size)); +#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { HDfprintf(stdout, "\n"); } #endif l++; y++; - } while ( ( rank >= (test_max_rank - 3) ) && - ( l < edge_size ) ); + } while ((rank >= (test_max_rank - 3)) && (l < edge_size)); k++; x++; - } while ( ( rank >= (test_max_rank - 2) ) && - ( k < edge_size ) ); + } while ((rank >= (test_max_rank - 2)) && (k < edge_size)); j++; w++; - } while ( ( rank >= (test_max_rank - 1) ) && - ( j < edge_size ) ); + } while ((rank >= (test_max_rank - 1)) && (j < edge_size)); i++; v++; - } while ( ( rank >= test_max_rank ) && - ( i < edge_size ) ); + } while ((rank >= test_max_rank) && (i < edge_size)); - return(good_data); + return (good_data); } /* lower_dim_size_comp_test__verify_data() */ - /*------------------------------------------------------------------------- - * Function: lower_dim_size_comp_test__run_test() - * - * Purpose: Verify that a bug in the computation of the size of the - * lower dimensions of a data space in H5S_obtain_datatype() - * has been corrected. + * Function: lower_dim_size_comp_test__run_test() * - * Return: void + * Purpose: Verify that a bug in the computation of the size of the + * lower dimensions of a dataspace in H5S_obtain_datatype() + * has been corrected. * - * Programmer: JRM -- 11/11/09 + * Return: void * - * Modifications: + * Programmer: JRM -- 11/11/09 * *------------------------------------------------------------------------- */ -#define LDSCT_DS_RANK 5 -#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0 +#define LDSCT_DS_RANK 5 static void -lower_dim_size_comp_test__run_test(const int chunk_edge_size, - const hbool_t use_collective_io, +lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io, const hid_t dset_type) { -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - const char *fcnName = "lower_dim_size_comp_test__run_test()"; - int rank; - hsize_t dims[32]; - hsize_t max_dims[32]; +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + const char *fcnName = "lower_dim_size_comp_test__run_test()"; + int rank; + hsize_t dims[32]; + hsize_t max_dims[32]; #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - const char *filename; - hbool_t data_ok = FALSE; - hbool_t mis_match = FALSE; - int i; - int start_index; - int stop_index; - int mrc; - int mpi_rank; - int mpi_size; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist = H5P_DEFAULT; - size_t small_ds_size; - size_t small_ds_slice_size; - size_t large_ds_size; - size_t large_ds_slice_size; - uint32_t expected_value; - uint32_t * small_ds_buf_0 = NULL; - uint32_t * small_ds_buf_1 = NULL; - uint32_t * large_ds_buf_0 = NULL; - uint32_t * large_ds_buf_1 = NULL; - uint32_t * ptr_0; - uint32_t * ptr_1; - hsize_t small_chunk_dims[LDSCT_DS_RANK]; - hsize_t large_chunk_dims[LDSCT_DS_RANK]; - hsize_t small_dims[LDSCT_DS_RANK]; - hsize_t large_dims[LDSCT_DS_RANK]; - hsize_t start[LDSCT_DS_RANK]; - hsize_t stride[LDSCT_DS_RANK]; - hsize_t count[LDSCT_DS_RANK]; - hsize_t block[LDSCT_DS_RANK]; - hsize_t small_sel_start[LDSCT_DS_RANK]; - hsize_t large_sel_start[LDSCT_DS_RANK]; - hid_t full_mem_small_ds_sid; - hid_t full_file_small_ds_sid; - hid_t mem_small_ds_sid; - hid_t file_small_ds_sid; - hid_t full_mem_large_ds_sid; - hid_t full_file_large_ds_sid; - hid_t mem_large_ds_sid; - hid_t file_large_ds_sid; - hid_t small_ds_dcpl_id = H5P_DEFAULT; - hid_t large_ds_dcpl_id = H5P_DEFAULT; - hid_t small_dataset; /* Dataset ID */ - hid_t large_dataset; /* Dataset ID */ - htri_t check; /* Shape comparison return value */ - herr_t ret; /* Generic return value */ + const char *filename; + hbool_t data_ok = FALSE; + hbool_t mis_match = FALSE; + int i; + int start_index; + int stop_index; + int mrc; + int mpi_rank; + int mpi_size; + MPI_Comm mpi_comm = MPI_COMM_NULL; + MPI_Info mpi_info = MPI_INFO_NULL; + hid_t fid; /* HDF5 file ID */ + hid_t acc_tpl; /* File access templates */ + hid_t xfer_plist = H5P_DEFAULT; + size_t small_ds_size; + size_t small_ds_slice_size; + size_t large_ds_size; +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + size_t large_ds_slice_size; +#endif + uint32_t expected_value; + uint32_t *small_ds_buf_0 = NULL; + uint32_t *small_ds_buf_1 = NULL; + uint32_t *large_ds_buf_0 = NULL; + uint32_t *large_ds_buf_1 = NULL; + uint32_t *ptr_0; + uint32_t *ptr_1; + hsize_t small_chunk_dims[LDSCT_DS_RANK]; + hsize_t large_chunk_dims[LDSCT_DS_RANK]; + hsize_t small_dims[LDSCT_DS_RANK]; + hsize_t large_dims[LDSCT_DS_RANK]; + hsize_t start[LDSCT_DS_RANK]; + hsize_t stride[LDSCT_DS_RANK]; + hsize_t count[LDSCT_DS_RANK]; + hsize_t block[LDSCT_DS_RANK]; + hsize_t small_sel_start[LDSCT_DS_RANK]; + hsize_t large_sel_start[LDSCT_DS_RANK]; + hid_t full_mem_small_ds_sid; + hid_t full_file_small_ds_sid; + hid_t mem_small_ds_sid; + hid_t file_small_ds_sid; + hid_t full_mem_large_ds_sid; + hid_t full_file_large_ds_sid; + hid_t mem_large_ds_sid; + hid_t file_large_ds_sid; + hid_t small_ds_dcpl_id = H5P_DEFAULT; + hid_t large_ds_dcpl_id = H5P_DEFAULT; + hid_t small_dataset; /* Dataset ID */ + hid_t large_dataset; /* Dataset ID */ + htri_t check; /* Shape comparison return value */ + herr_t ret; /* Generic return value */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - HDassert( mpi_size >= 1 ); + HDassert(mpi_size >= 1); mpi_comm = MPI_COMM_WORLD; mpi_info = MPI_INFO_NULL; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", - fcnName, mpi_rank, (int)chunk_edge_size); - HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n", - fcnName, mpi_rank, (int)use_collective_io); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size); + HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - - small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10); - small_ds_slice_size = (size_t) ( 1 * 1 * 10 * 10); + small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10); + small_ds_slice_size = (size_t)(1 * 1 * 10 * 10); large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10); - large_ds_slice_size = (size_t) (10 * 10 * 10 * 10); -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", - fcnName, mpi_rank, +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + large_ds_slice_size = (size_t)(10 * 10 * 10 * 10); + + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank, (int)small_ds_size, (int)small_ds_slice_size); - HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", - fcnName, mpi_rank, + HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank, (int)large_ds_size, (int)large_ds_slice_size); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ @@ -1676,13 +1539,12 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size); VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded"); - /* initialize the buffers */ ptr_0 = small_ds_buf_0; ptr_1 = small_ds_buf_1; - for ( i = 0; i < (int)small_ds_size; i++ ) { + for (i = 0; i < (int)small_ds_size; i++) { *ptr_0 = (uint32_t)i; *ptr_1 = 0; @@ -1694,7 +1556,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ptr_0 = large_ds_buf_0; ptr_1 = large_ds_buf_1; - for ( i = 0; i < (int)large_ds_size; i++ ) { + for (i = 0; i < (int)large_ds_size; i++) { *ptr_0 = (uint32_t)i; *ptr_1 = 0; @@ -1703,12 +1565,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ptr_1++; } - /* get the file name */ filename = (const char *)GetTestParameters(); - HDassert( filename != NULL ); - + HDassert(filename != NULL); /* ---------------------------------------- * CREATE AN HDF5 FILE WITH PARALLEL ACCESS @@ -1727,11 +1587,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Pclose(acc_tpl); VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded"); - /* setup dims: */ small_dims[0] = (hsize_t)(mpi_size + 1); - small_dims[1] = 1; - small_dims[2] = 1; + small_dims[1] = 1; + small_dims[2] = 1; small_dims[3] = 10; small_dims[4] = 10; @@ -1741,52 +1600,40 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, large_dims[3] = 10; large_dims[4] = 10; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)small_dims[0], (int)small_dims[1], - (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]); - HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)large_dims[0], (int)large_dims[1], - (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0], + (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]); + HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0], + (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]); } -#endif +#endif - /* create data spaces */ + /* create dataspaces */ full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((full_mem_small_ds_sid != 0), - "H5Screate_simple() full_mem_small_ds_sid succeeded"); + VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded"); full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((full_file_small_ds_sid != 0), - "H5Screate_simple() full_file_small_ds_sid succeeded"); + VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded"); mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((mem_small_ds_sid != 0), - "H5Screate_simple() mem_small_ds_sid succeeded"); + VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded"); file_small_ds_sid = H5Screate_simple(5, small_dims, NULL); - VRFY((file_small_ds_sid != 0), - "H5Screate_simple() file_small_ds_sid succeeded"); - + VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded"); full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((full_mem_large_ds_sid != 0), - "H5Screate_simple() full_mem_large_ds_sid succeeded"); + VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded"); full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((full_file_large_ds_sid != 0), - "H5Screate_simple() full_file_large_ds_sid succeeded"); + VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded"); mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((mem_large_ds_sid != 0), - "H5Screate_simple() mem_large_ds_sid succeeded"); + VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded"); file_large_ds_sid = H5Screate_simple(5, large_dims, NULL); - VRFY((file_large_ds_sid != 0), - "H5Screate_simple() file_large_ds_sid succeeded"); - + VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded"); /* Select the entire extent of the full small ds dataspaces */ ret = H5Sselect_all(full_mem_small_ds_sid); @@ -1795,7 +1642,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Sselect_all(full_file_small_ds_sid); VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded"); - /* Select the entire extent of the full large ds dataspaces */ ret = H5Sselect_all(full_mem_large_ds_sid); VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded"); @@ -1803,25 +1649,23 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Sselect_all(full_file_large_ds_sid); VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded"); - /* if chunk edge size is greater than zero, set up the small and * large data set creation property lists to specify chunked * datasets. */ - if ( chunk_edge_size > 0 ) { + if (chunk_edge_size > 0) { small_chunk_dims[0] = (hsize_t)(1); small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1; small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)small_chunk_dims[0], - (int)small_chunk_dims[1], (int)small_chunk_dims[2], +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, + (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2], (int)small_chunk_dims[3], (int)small_chunk_dims[4]); } -#endif +#endif small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded"); @@ -1833,18 +1677,16 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); large_chunk_dims[0] = (hsize_t)(1); - large_chunk_dims[1] = large_chunk_dims[2] = - large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size; - + large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] = + (hsize_t)chunk_edge_size; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)large_chunk_dims[0], - (int)large_chunk_dims[1], (int)large_chunk_dims[2], +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, + (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2], (int)large_chunk_dims[3], (int)large_chunk_dims[4]); } -#endif +#endif large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded"); @@ -1856,30 +1698,23 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded"); } - /* create the small dataset */ - small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, - file_small_ds_sid, H5P_DEFAULT, + small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT, small_ds_dcpl_id, H5P_DEFAULT); VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded"); - /* create the large dataset */ - large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, - file_large_ds_sid, H5P_DEFAULT, + large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT, large_ds_dcpl_id, H5P_DEFAULT); VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded"); -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s:%d: small/large ds id = %d / %d.\n", - fcnName, mpi_rank, (int)small_dataset, +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset, (int)large_dataset); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - /* setup xfer property list */ xfer_plist = H5Pcreate(H5P_DATASET_XFER); VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); @@ -1887,14 +1722,12 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if ( ! use_collective_io ) { + if (!use_collective_io) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, - H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0), "H5Pset_dxpl_mpio_collective_opt() suceeded"); + ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO); + VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded"); } - /* setup selection to write initial data to the small data sets */ start[0] = (hsize_t)(mpi_rank + 1); start[1] = start[2] = start[3] = start[4] = 0; @@ -1908,136 +1741,90 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, block[0] = block[1] = block[2] = 1; block[3] = block[4] = 10; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s:%d: settings for small data set initialization.\n", - fcnName, mpi_rank); - HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)stride[0], (int)stride[1], - (int)stride[2], (int)stride[3], (int)stride[4]); - HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ /* setup selections for writing initial data to the small data set */ - ret = H5Sselect_hyperslab(mem_small_ds_sid, - H5S_SELECT_SET, - start, - stride, - count, - block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) suceeded"); - - ret = H5Sselect_hyperslab(file_small_ds_sid, - H5S_SELECT_SET, - start, - stride, - count, - block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) suceeded"); - - if ( MAINPROCESS ) { /* add an additional slice to the selections */ + ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded"); + + if (MAINPROCESS) { /* add an additional slice to the selections */ start[0] = 0; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s:%d: added settings for main process.\n", - fcnName, mpi_rank); - HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)stride[0], (int)stride[1], - (int)stride[2], (int)stride[3], (int)stride[4]); - HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - ret = H5Sselect_hyperslab(mem_small_ds_sid, - H5S_SELECT_OR, - start, - stride, - count, - block); - VRFY((ret>= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) suceeded"); - - ret = H5Sselect_hyperslab(file_small_ds_sid, - H5S_SELECT_OR, - start, - stride, - count, - block); - VRFY((ret>= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) suceeded"); - } + ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded"); - check = H5Sselect_valid(mem_small_ds_sid); - VRFY((check == TRUE),"H5Sselect_valid(mem_small_ds_sid) returns TRUE"); + ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded"); + } - check = H5Sselect_valid(file_small_ds_sid); - VRFY((check == TRUE),"H5Sselect_valid(file_small_ds_sid) returns TRUE"); + check = H5Sselect_valid(mem_small_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE"); + check = H5Sselect_valid(file_small_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE"); /* write the initial value of the small data set to file */ -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n", - fcnName, mpi_rank); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - ret = H5Dwrite(small_dataset, - dset_type, - mem_small_ds_sid, - file_small_ds_sid, - xfer_plist, - small_ds_buf_0); + ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0); VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded"); - - /* read the small data set back to verify that it contains the - * expected data. Note that each process reads in the entire + /* read the small data set back to verify that it contains the + * expected data. Note that each process reads in the entire * data set and verifies it. */ - ret = H5Dread(small_dataset, - H5T_NATIVE_UINT32, - full_mem_small_ds_sid, - full_file_small_ds_sid, - xfer_plist, + ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist, small_ds_buf_1); VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded"); - /* sync with the other processes before checking data */ mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes"); - + VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes"); /* verify that the correct data was written to the small data set, * and reset the buffer to zero in passing. */ expected_value = 0; - mis_match = FALSE; - ptr_1 = small_ds_buf_1; + mis_match = FALSE; + ptr_1 = small_ds_buf_1; i = 0; - for ( i = 0; i < (int)small_ds_size; i++ ) { + for (i = 0; i < (int)small_ds_size; i++) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } @@ -2047,9 +1834,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ptr_1++; expected_value++; } - VRFY( (mis_match == FALSE), "small ds init data good."); - - + VRFY((mis_match == FALSE), "small ds init data good."); /* setup selections for writing initial data to the large data set */ start[0] = (hsize_t)(mpi_rank + 1); @@ -2063,110 +1848,70 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, block[0] = (hsize_t)1; block[1] = block[2] = block[3] = block[4] = (hsize_t)10; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s:%d: settings for large data set initialization.\n", - fcnName, mpi_rank); - HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)stride[0], (int)stride[1], - (int)stride[2], (int)stride[3], (int)stride[4]); - HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - ret = H5Sselect_hyperslab(mem_large_ds_sid, - H5S_SELECT_SET, - start, - stride, - count, - block); - VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) suceeded"); - - ret = H5Sselect_hyperslab(file_large_ds_sid, - H5S_SELECT_SET, - start, - stride, - count, - block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", - fcnName, mpi_rank, - (int)H5Sget_select_npoints(mem_large_ds_sid)); - HDfprintf(stdout, - "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", - fcnName, mpi_rank, - (int)H5Sget_select_npoints(file_large_ds_sid)); + ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded"); + + ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(mem_large_ds_sid)); + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(file_large_ds_sid)); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - if ( MAINPROCESS ) { /* add an additional slice to the selections */ + if (MAINPROCESS) { /* add an additional slice to the selections */ start[0] = (hsize_t)0; -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s:%d: added settings for main process.\n", - fcnName, mpi_rank); - HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)start[0], (int)start[1], - (int)start[2], (int)start[3], (int)start[4]); - HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)stride[0], (int)stride[1], - (int)stride[2], (int)stride[3], (int)stride[4]); - HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)count[0], (int)count[1], - (int)count[2], (int)count[3], (int)count[4]); - HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", - fcnName, mpi_rank, (int)block[0], (int)block[1], - (int)block[2], (int)block[3], (int)block[4]); +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0], + (int)start[1], (int)start[2], (int)start[3], (int)start[4]); + HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0], + (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]); + HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0], + (int)count[1], (int)count[2], (int)count[3], (int)count[4]); + HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0], + (int)block[1], (int)block[2], (int)block[3], (int)block[4]); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - ret = H5Sselect_hyperslab(mem_large_ds_sid, - H5S_SELECT_OR, - start, - stride, - count, - block); - VRFY((ret>= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) suceeded"); - - ret = H5Sselect_hyperslab(file_large_ds_sid, - H5S_SELECT_OR, - start, - stride, - count, - block); - VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded"); - -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, - "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", - fcnName, mpi_rank, - (int)H5Sget_select_npoints(mem_large_ds_sid)); - HDfprintf(stdout, - "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", - fcnName, mpi_rank, - (int)H5Sget_select_npoints(file_large_ds_sid)); + ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded"); + + ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded"); + +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(mem_large_ds_sid)); + HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank, + (int)H5Sget_select_npoints(file_large_ds_sid)); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ } - /* try clipping the selection back to the large data space proper */ + /* try clipping the selection back to the large dataspace proper */ start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0; stride[0] = (hsize_t)(2 * (mpi_size + 1)); @@ -2177,93 +1922,70 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, block[0] = (hsize_t)(mpi_size + 1); block[1] = block[2] = block[3] = block[4] = (hsize_t)10; - ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, - start, stride, count, block); - VRFY((ret != FAIL),"H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded"); + ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block); + VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded"); - ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, - start, stride, count, block); - VRFY((ret != FAIL),"H5Sselect_hyperslab(file_large_ds_sid, and) succeeded"); + ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block); + VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded"); -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims); - HDfprintf(stdout, - "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", - fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1], - (int)dims[2], (int)dims[3], (int)dims[4]); + HDfprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank, + (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]); rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims); - HDfprintf(stdout, - "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", - fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1], - (int)dims[2], (int)dims[3], (int)dims[4]); + HDfprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank, + (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - check = H5Sselect_valid(mem_large_ds_sid); - VRFY((check == TRUE),"H5Sselect_valid(mem_large_ds_sid) returns TRUE"); - - check = H5Sselect_valid(file_large_ds_sid); - VRFY((check == TRUE),"H5Sselect_valid(file_large_ds_sid) returns TRUE"); + check = H5Sselect_valid(mem_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE"); + check = H5Sselect_valid(file_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE"); /* write the initial value of the large data set to file */ -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n", - fcnName, mpi_rank); - HDfprintf(stdout, - "%s:%d: large_dataset = %d.\n", - fcnName, mpi_rank, - (int)large_dataset); - HDfprintf(stdout, - "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", - fcnName, mpi_rank, +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank); + HDfprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset); + HDfprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank, (int)mem_large_ds_sid, (int)file_large_ds_sid); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ - ret = H5Dwrite(large_dataset, - dset_type, - mem_large_ds_sid, - file_large_ds_sid, - xfer_plist, - large_ds_buf_0); + ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0); - if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr); + if (ret < 0) + H5Eprint2(H5E_DEFAULT, stderr); VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded"); - /* sync with the other processes before checking data */ mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes"); + VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes"); - /* read the large data set back to verify that it contains the - * expected data. Note that each process reads in the entire + /* read the large data set back to verify that it contains the + * expected data. Note that each process reads in the entire * data set. */ - ret = H5Dread(large_dataset, - H5T_NATIVE_UINT32, - full_mem_large_ds_sid, - full_file_large_ds_sid, - xfer_plist, + ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist, large_ds_buf_1); VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded"); - /* verify that the correct data was written to the large data set. * in passing, reset the buffer to zeros */ expected_value = 0; - mis_match = FALSE; - ptr_1 = large_ds_buf_1; + mis_match = FALSE; + ptr_1 = large_ds_buf_1; i = 0; - for ( i = 0; i < (int)large_ds_size; i++ ) { + for (i = 0; i < (int)large_ds_size; i++) { - if ( *ptr_1 != expected_value ) { + if (*ptr_1 != expected_value) { mis_match = TRUE; } @@ -2273,72 +1995,53 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ptr_1++; expected_value++; } - VRFY( (mis_match == FALSE), "large ds init data good."); + VRFY((mis_match == FALSE), "large ds init data good."); /***********************************/ /***** INITIALIZATION COMPLETE *****/ /***********************************/ - - /* read a checkerboard selection of the process slice of the - * small on disk data set into the process slice of the large + /* read a checkerboard selection of the process slice of the + * small on disk data set into the process slice of the large * in memory data set, and verify the data read. */ small_sel_start[0] = (hsize_t)(mpi_rank + 1); - small_sel_start[1] = small_sel_start[2] = - small_sel_start[3] = small_sel_start[4] = 0; - - lower_dim_size_comp_test__select_checker_board(mpi_rank, - file_small_ds_sid, - /* tgt_rank = */ 5, - small_dims, - /* checker_edge_size = */ 3, - /* sel_rank */ 2, - small_sel_start); - - expected_value = (uint32_t) - ((small_sel_start[0] * small_dims[1] * small_dims[2] * - small_dims[3] * small_dims[4]) + - (small_sel_start[1] * small_dims[2] * small_dims[3] * - small_dims[4]) + - (small_sel_start[2] * small_dims[3] * small_dims[4]) + - (small_sel_start[3] * small_dims[4]) + - (small_sel_start[4])); + small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0; + + lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid, + /* tgt_rank = */ 5, small_dims, + /* checker_edge_size = */ 3, + /* sel_rank */ 2, small_sel_start); + expected_value = + (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) + + (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) + + (small_sel_start[2] * small_dims[3] * small_dims[4]) + + (small_sel_start[3] * small_dims[4]) + (small_sel_start[4])); large_sel_start[0] = (hsize_t)(mpi_rank + 1); large_sel_start[1] = 5; large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0; - lower_dim_size_comp_test__select_checker_board(mpi_rank, - mem_large_ds_sid, - /* tgt_rank = */ 5, - large_dims, - /* checker_edge_size = */ 3, - /* sel_rank = */ 2, - large_sel_start); + lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid, + /* tgt_rank = */ 5, large_dims, + /* checker_edge_size = */ 3, + /* sel_rank = */ 2, large_sel_start); - - /* verify that H5S_select_shape_same() reports the two + /* verify that H5Sselect_shape_same() reports the two * selections as having the same shape. */ - check = H5S_select_shape_same_test(mem_large_ds_sid, - file_small_ds_sid); - VRFY((check == TRUE), "H5S_select_shape_same_test passed (1)"); - + check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed (1)"); - ret = H5Dread(small_dataset, - H5T_NATIVE_UINT32, - mem_large_ds_sid, - file_small_ds_sid, - xfer_plist, + ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist, large_ds_buf_1); VRFY((ret >= 0), "H5Sread() slice from small ds succeeded."); -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ @@ -2347,28 +2050,25 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, data_ok = TRUE; - start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * - large_dims[3] * large_dims[4]) + - (large_sel_start[1] * large_dims[2] * large_dims[3] * - large_dims[4]) + + start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) + (large_sel_start[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[3] * large_dims[4]) + - (large_sel_start[4])); + (large_sel_start[3] * large_dims[4]) + (large_sel_start[4])); - stop_index = start_index + (int)small_ds_slice_size; + stop_index = start_index + (int)small_ds_slice_size; - HDassert( 0 <= start_index ); - HDassert( start_index < stop_index ); - HDassert( stop_index <= (int)large_ds_size ); + HDassert(0 <= start_index); + HDassert(start_index < stop_index); + HDassert(stop_index <= (int)large_ds_size); ptr_1 = large_ds_buf_1; - for ( i = 0; i < start_index; i++ ) { + for (i = 0; i < start_index; i++) { - if ( *ptr_1 != (uint32_t)0 ) { + if (*ptr_1 != (uint32_t)0) { data_ok = FALSE; - *ptr_1 = (uint32_t)0; + *ptr_1 = (uint32_t)0; } ptr_1++; @@ -2376,16 +2076,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, VRFY((data_ok == TRUE), "slice read from small ds data good(1)."); - data_ok = lower_dim_size_comp_test__verify_data(ptr_1, #if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - mpi_rank, + mpi_rank, #endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ - /* rank */ 2, - /* edge_size */ 10, - /* checker_edge_size */ 3, - expected_value, - /* buf_starts_in_checker */ TRUE); + /* rank */ 2, + /* edge_size */ 10, + /* checker_edge_size */ 3, expected_value, + /* buf_starts_in_checker */ TRUE); VRFY((data_ok == TRUE), "slice read from small ds data good(2)."); @@ -2393,13 +2091,12 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ptr_1 += small_ds_slice_size; + for (i = stop_index; i < (int)large_ds_size; i++) { - for ( i = stop_index; i < (int)large_ds_size; i++ ) { - - if ( *ptr_1 != (uint32_t)0 ) { + if (*ptr_1 != (uint32_t)0) { data_ok = FALSE; - *ptr_1 = (uint32_t)0; + *ptr_1 = (uint32_t)0; } ptr_1++; @@ -2407,59 +2104,41 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, VRFY((data_ok == TRUE), "slice read from small ds data good(3)."); - - - - - /* read a checkerboard selection of a slice of the process slice of - * the large on disk data set into the process slice of the small + /* read a checkerboard selection of a slice of the process slice of + * the large on disk data set into the process slice of the small * in memory data set, and verify the data read. */ small_sel_start[0] = (hsize_t)(mpi_rank + 1); - small_sel_start[1] = small_sel_start[2] = - small_sel_start[3] = small_sel_start[4] = 0; + small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0; - lower_dim_size_comp_test__select_checker_board(mpi_rank, - mem_small_ds_sid, - /* tgt_rank = */ 5, - small_dims, - /* checker_edge_size = */ 3, - /* sel_rank */ 2, - small_sel_start); + lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid, + /* tgt_rank = */ 5, small_dims, + /* checker_edge_size = */ 3, + /* sel_rank */ 2, small_sel_start); large_sel_start[0] = (hsize_t)(mpi_rank + 1); large_sel_start[1] = 5; large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0; - lower_dim_size_comp_test__select_checker_board(mpi_rank, - file_large_ds_sid, - /* tgt_rank = */ 5, - large_dims, - /* checker_edge_size = */ 3, - /* sel_rank = */ 2, - large_sel_start); + lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid, + /* tgt_rank = */ 5, large_dims, + /* checker_edge_size = */ 3, + /* sel_rank = */ 2, large_sel_start); - - /* verify that H5S_select_shape_same() reports the two + /* verify that H5Sselect_shape_same() reports the two * selections as having the same shape. */ - check = H5S_select_shape_same_test(mem_small_ds_sid, - file_large_ds_sid); - VRFY((check == TRUE), "H5S_select_shape_same_test passed (2)"); - + check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid); + VRFY((check == TRUE), "H5Sselect_shape_same passed (2)"); - ret = H5Dread(large_dataset, - H5T_NATIVE_UINT32, - mem_small_ds_sid, - file_large_ds_sid, - xfer_plist, + ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist, small_ds_buf_1); VRFY((ret >= 0), "H5Sread() slice from large ds succeeded."); -#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { +#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank); } #endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */ @@ -2468,31 +2147,28 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, data_ok = TRUE; - expected_value = (uint32_t) - ((large_sel_start[0] * large_dims[1] * large_dims[2] * - large_dims[3] * large_dims[4]) + - (large_sel_start[1] * large_dims[2] * large_dims[3] * - large_dims[4]) + - (large_sel_start[2] * large_dims[3] * large_dims[4]) + - (large_sel_start[3] * large_dims[4]) + - (large_sel_start[4])); + expected_value = + (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[2] * large_dims[3] * large_dims[4]) + + (large_sel_start[3] * large_dims[4]) + (large_sel_start[4])); start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size; - stop_index = start_index + (int)small_ds_slice_size; + stop_index = start_index + (int)small_ds_slice_size; - HDassert( 0 <= start_index ); - HDassert( start_index < stop_index ); - HDassert( stop_index <= (int)small_ds_size ); + HDassert(0 <= start_index); + HDassert(start_index < stop_index); + HDassert(stop_index <= (int)small_ds_size); ptr_1 = small_ds_buf_1; - for ( i = 0; i < start_index; i++ ) { + for (i = 0; i < start_index; i++) { - if ( *ptr_1 != (uint32_t)0 ) { + if (*ptr_1 != (uint32_t)0) { data_ok = FALSE; - *ptr_1 = (uint32_t)0; + *ptr_1 = (uint32_t)0; } ptr_1++; @@ -2500,15 +2176,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, VRFY((data_ok == TRUE), "slice read from large ds data good(1)."); - data_ok = lower_dim_size_comp_test__verify_data(ptr_1, #if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG mpi_rank, #endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ /* rank */ 2, /* edge_size */ 10, - /* checker_edge_size */ 3, - expected_value, + /* checker_edge_size */ 3, expected_value, /* buf_starts_in_checker */ TRUE); VRFY((data_ok == TRUE), "slice read from large ds data good(2)."); @@ -2517,20 +2191,19 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ptr_1 += small_ds_slice_size; + for (i = stop_index; i < (int)small_ds_size; i++) { - for ( i = stop_index; i < (int)small_ds_size; i++ ) { - - if ( *ptr_1 != (uint32_t)0 ) { + if (*ptr_1 != (uint32_t)0) { #if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG - if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) { - HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", - fcnName, mpi_rank, (int)i, (int)(*ptr_1)); + if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) { + HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i, + (int)(*ptr_1)); } #endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */ data_ok = FALSE; - *ptr_1 = (uint32_t)0; + *ptr_1 = (uint32_t)0; } ptr_1++; @@ -2538,7 +2211,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, VRFY((data_ok == TRUE), "slice read from large ds data good(3)."); - /* Close dataspaces */ ret = H5Sclose(full_mem_small_ds_sid); VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded"); @@ -2552,7 +2224,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Sclose(file_small_ds_sid); VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded"); - ret = H5Sclose(full_mem_large_ds_sid); VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded"); @@ -2565,7 +2236,6 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Sclose(file_large_ds_sid); VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded"); - /* Close Datasets */ ret = H5Dclose(small_dataset); VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded"); @@ -2573,36 +2243,36 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size, ret = H5Dclose(large_dataset); VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded"); - /* close the file collectively */ MESG("about to close file."); ret = H5Fclose(fid); VRFY((ret != FAIL), "file close succeeded"); /* Free memory buffers */ - if ( small_ds_buf_0 != NULL ) HDfree(small_ds_buf_0); - if ( small_ds_buf_1 != NULL ) HDfree(small_ds_buf_1); + if (small_ds_buf_0 != NULL) + HDfree(small_ds_buf_0); + if (small_ds_buf_1 != NULL) + HDfree(small_ds_buf_1); - if ( large_ds_buf_0 != NULL ) HDfree(large_ds_buf_0); - if ( large_ds_buf_1 != NULL ) HDfree(large_ds_buf_1); + if (large_ds_buf_0 != NULL) + HDfree(large_ds_buf_0); + if (large_ds_buf_1 != NULL) + HDfree(large_ds_buf_1); return; } /* lower_dim_size_comp_test__run_test() */ - /*------------------------------------------------------------------------- - * Function: lower_dim_size_comp_test() - * - * Purpose: Test to see if an error in the computation of the size - * of the lower dimensions in H5S_obtain_datatype() has - * been corrected. + * Function: lower_dim_size_comp_test() * - * Return: void + * Purpose: Test to see if an error in the computation of the size + * of the lower dimensions in H5S_obtain_datatype() has + * been corrected. * - * Programmer: JRM -- 11/11/09 + * Return: void * - * Modifications: + * Programmer: JRM -- 11/11/09 * *------------------------------------------------------------------------- */ @@ -2611,107 +2281,92 @@ void lower_dim_size_comp_test(void) { /* const char *fcnName = "lower_dim_size_comp_test()"; */ - int chunk_edge_size = 0; - int use_collective_io = 1; - hid_t dset_type = H5T_NATIVE_UINT; -#if 0 - sleep(60); -#endif - HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); - for ( use_collective_io = (hbool_t)0; - (int)use_collective_io <= 1; - (hbool_t)(use_collective_io++) ) { + int chunk_edge_size = 0; + int use_collective_io; + HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned)); + for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) { chunk_edge_size = 0; - lower_dim_size_comp_test__run_test(chunk_edge_size, - (hbool_t)use_collective_io, - dset_type); - + lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT); chunk_edge_size = 5; - lower_dim_size_comp_test__run_test(chunk_edge_size, - (hbool_t)use_collective_io, - dset_type); - } + lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT); + } /* end for */ return; - } /* lower_dim_size_comp_test() */ - /*------------------------------------------------------------------------- - * Function: link_chunk_collective_io_test() + * Function: link_chunk_collective_io_test() * - * Purpose: Test to verify that an error in MPI type management in - * H5D_link_chunk_collective_io() has been corrected. - * In this bug, we used to free MPI types regardless of - * whether they were basic or derived. + * Purpose: Test to verify that an error in MPI type management in + * H5D_link_chunk_collective_io() has been corrected. + * In this bug, we used to free MPI types regardless of + * whether they were basic or derived. * - * This test is based on a bug report kindly provided by - * Rob Latham of the MPICH team and ANL. + * This test is based on a bug report kindly provided by + * Rob Latham of the MPICH team and ANL. * - * The basic thrust of the test is to cause a process - * to participate in a collective I/O in which it: + * The basic thrust of the test is to cause a process + * to participate in a collective I/O in which it: * - * 1) Reads or writes exactly one chunk, + * 1) Reads or writes exactly one chunk, * - * 2) Has no in memory buffer for any other chunk. + * 2) Has no in memory buffer for any other chunk. * - * The test differers from Rob Latham's bug report in - * that is runs with an arbitrary number of proceeses, - * and uses a 1 dimensional dataset. + * The test differers from Rob Latham's bug report in + * that is runs with an arbitrary number of proceeses, + * and uses a 1 dimensional dataset. * - * Return: void + * Return: void * - * Programmer: JRM -- 12/16/09 - * - * Modifications: + * Programmer: JRM -- 12/16/09 * *------------------------------------------------------------------------- */ -#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16 +#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16 void link_chunk_collective_io_test(void) { /* const char *fcnName = "link_chunk_collective_io_test()"; */ const char *filename; - hbool_t mis_match = FALSE; - int i; - int mrc; - int mpi_rank; - int mpi_size; - MPI_Comm mpi_comm = MPI_COMM_WORLD; - MPI_Info mpi_info = MPI_INFO_NULL; - hsize_t count[1] = {1}; - hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; - hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; - hsize_t start[1]; - hsize_t dims[1]; - hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; - herr_t ret; /* Generic return value */ - hid_t file_id; - hid_t acc_tpl; - hid_t dset_id; - hid_t file_ds_sid; - hid_t write_mem_ds_sid; - hid_t read_mem_ds_sid; - hid_t ds_dcpl_id; - hid_t xfer_plist; - double diff; - double expected_value; - double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; - double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; + hbool_t mis_match = FALSE; + int i; + int mrc; + int mpi_rank; + int mpi_size; + MPI_Comm mpi_comm = MPI_COMM_WORLD; + MPI_Info mpi_info = MPI_INFO_NULL; + hsize_t count[1] = {1}; + hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; + hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; + hsize_t start[1]; + hsize_t dims[1]; + hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE}; + herr_t ret; /* Generic return value */ + hid_t file_id; + hid_t acc_tpl; + hid_t dset_id; + hid_t file_ds_sid; + hid_t write_mem_ds_sid; + hid_t read_mem_ds_sid; + hid_t ds_dcpl_id; + hid_t xfer_plist; + double diff; + double expected_value; + double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; + double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE]; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - HDassert( mpi_size > 0 ); + HDassert(mpi_size > 0); /* get the file name */ filename = (const char *)GetTestParameters(); - HDassert( filename != NULL ); + HDassert(filename != NULL); /* setup file access template */ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type); @@ -2730,18 +2385,15 @@ link_chunk_collective_io_test(void) /* setup dims */ dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE)); - /* setup mem and file data spaces */ + /* setup mem and file dataspaces */ write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL); - VRFY((write_mem_ds_sid != 0), - "H5Screate_simple() write_mem_ds_sid succeeded"); + VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded"); read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL); - VRFY((read_mem_ds_sid != 0), - "H5Screate_simple() read_mem_ds_sid succeeded"); + VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded"); file_ds_sid = H5Screate_simple(1, dims, NULL); - VRFY((file_ds_sid != 0), - "H5Screate_simple() file_ds_sid succeeded"); + VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded"); /* setup data set creation property list */ ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE); @@ -2754,9 +2406,8 @@ link_chunk_collective_io_test(void) VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded"); /* create the data set */ - dset_id = H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, - file_ds_sid, H5P_DEFAULT, - ds_dcpl_id, H5P_DEFAULT); + dset_id = + H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT); VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded"); /* close the dataset creation property list */ @@ -2764,24 +2415,18 @@ link_chunk_collective_io_test(void) VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded"); /* setup local data */ - expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * - (double)(mpi_rank); - for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) { + expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank); + for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) { local_data_written[i] = expected_value; - local_data_read[i] = 0.0; - expected_value += 1.0; + local_data_read[i] = 0.0; + expected_value += 1.0; } /* select the file and mem spaces */ start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE); - ret = H5Sselect_hyperslab(file_ds_sid, - H5S_SELECT_SET, - start, - stride, - count, - block); - VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) suceeded"); + ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block); + VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded"); ret = H5Sselect_all(write_mem_ds_sid); VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded"); @@ -2796,26 +2441,16 @@ link_chunk_collective_io_test(void) VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); /* write the data set */ - ret = H5Dwrite(dset_id, - H5T_NATIVE_DOUBLE, - write_mem_ds_sid, - file_ds_sid, - xfer_plist, - local_data_written); + ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written); VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded"); - + /* sync with the other processes before checking data */ mrc = MPI_Barrier(MPI_COMM_WORLD); - VRFY((mrc==MPI_SUCCESS), "Sync after dataset write"); + VRFY((mrc == MPI_SUCCESS), "Sync after dataset write"); /* read this processes slice of the dataset back in */ - ret = H5Dread(dset_id, - H5T_NATIVE_DOUBLE, - read_mem_ds_sid, - file_ds_sid, - xfer_plist, - local_data_read); + ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read); VRFY((ret >= 0), "H5Dread() dataset read succeeded"); /* close the xfer property list */ @@ -2824,17 +2459,17 @@ link_chunk_collective_io_test(void) /* verify the data */ mis_match = FALSE; - for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) { + for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) { diff = local_data_written[i] - local_data_read[i]; diff = fabs(diff); - if ( diff >= 0.001 ) { + if (diff >= 0.001) { mis_match = TRUE; - } + } } - VRFY( (mis_match == FALSE), "dataset data good."); + VRFY((mis_match == FALSE), "dataset data good."); /* Close dataspaces */ ret = H5Sclose(write_mem_ds_sid); @@ -2857,4 +2492,3 @@ link_chunk_collective_io_test(void) return; } /* link_chunk_collective_io_test() */ - diff --git a/testpar/t_subfiling_vfd.c b/testpar/t_subfiling_vfd.c new file mode 100644 index 0000000..b65aef0 --- /dev/null +++ b/testpar/t_subfiling_vfd.c @@ -0,0 +1,2037 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * HDF5 Subfiling VFD tests + * + * NOTE: these tests currently assume that the default I/O concentrator + * selection strategy for the Subfiling VFD is to use 1 I/O + * concentrator per node. If that changes in the future, some of + * these tests will need updating. + */ + +#include <mpi.h> + +#include "testpar.h" +#include "H5srcdir.h" +#include "H5MMprivate.h" + +#ifdef H5_HAVE_SUBFILING_VFD + +#include "H5FDsubfiling.h" +#include "H5FDioc.h" + +#define SUBFILING_TEST_DIR H5FD_SUBFILING_NAME + +/* The smallest Subfiling stripe size used for testing */ +#define SUBFILING_MIN_STRIPE_SIZE 128 + +#ifndef PATH_MAX +#define PATH_MAX 4096 +#endif + +#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0]) + +#define CHECK_PASSED() \ + do { \ + int err_result = (nerrors > curr_nerrors); \ + \ + mpi_code_g = MPI_Allreduce(MPI_IN_PLACE, &err_result, 1, MPI_INT, MPI_MAX, comm_g); \ + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Allreduce succeeded"); \ + \ + if (MAINPROCESS) { \ + if (err_result == 0) \ + PASSED(); \ + else \ + H5_FAILED(); \ + } \ + } while (0) + +static MPI_Comm comm_g = MPI_COMM_WORLD; +static MPI_Info info_g = MPI_INFO_NULL; +static int mpi_rank; +static int mpi_size; +static int mpi_code_g; +static int num_nodes_g; +static int num_iocs_g; + +static MPI_Comm node_local_comm = MPI_COMM_WORLD; +static int node_local_rank; +static int node_local_size; + +static MPI_Comm ioc_comm = MPI_COMM_WORLD; +static int ioc_comm_rank; +static int ioc_comm_size; + +static long long stripe_size_g = -1; +static long ioc_per_node_g = -1; +static int ioc_thread_pool_size_g = -1; + +int nerrors = 0; +int curr_nerrors = 0; + +/* Function pointer typedef for test functions */ +typedef void (*test_func)(void); + +/* Utility functions */ +static hid_t create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, hbool_t custom_config, + H5FD_subfiling_params_t *custom_cfg, int32_t thread_pool_size); + +/* Test functions */ +static void test_create_and_close(void); +static void test_config_file(void); +static void test_stripe_sizes(void); +static void test_read_different_stripe_size(void); +static void test_subfiling_precreate_rank_0(void); +static void test_subfiling_write_many_read_one(void); +static void test_subfiling_write_many_read_few(void); +static void test_subfiling_h5fuse(void); + +static test_func tests[] = { + test_create_and_close, + test_config_file, + test_stripe_sizes, + test_read_different_stripe_size, + test_subfiling_precreate_rank_0, + test_subfiling_write_many_read_one, + test_subfiling_write_many_read_few, + test_subfiling_h5fuse, +}; + +/* --------------------------------------------------------------------------- + * Function: create_subfiling_ioc_fapl + * + * Purpose: Create and populate a subfiling FAPL ID. + * + * Return: Success: HID of the top-level (subfiling) FAPL, a non-negative + * value. + * Failure: H5I_INVALID_HID, a negative value. + * --------------------------------------------------------------------------- + */ +static hid_t +create_subfiling_ioc_fapl(MPI_Comm comm, MPI_Info info, hbool_t custom_config, + H5FD_subfiling_params_t *custom_cfg, int32_t thread_pool_size) +{ + H5FD_subfiling_config_t subfiling_conf; + H5FD_ioc_config_t ioc_conf; + hid_t ret_value = H5I_INVALID_HID; + + HDassert(!custom_config || custom_cfg); + + if ((ret_value = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR; + + if (H5Pset_mpi_params(ret_value, comm, info) < 0) + TEST_ERROR; + + if (!custom_config) { + if (H5Pset_fapl_subfiling(ret_value, NULL) < 0) + TEST_ERROR; + } + else { + /* Get defaults for Subfiling configuration */ + if (H5Pget_fapl_subfiling(ret_value, &subfiling_conf) < 0) + TEST_ERROR; + + /* Set custom configuration */ + subfiling_conf.shared_cfg = *custom_cfg; + + if (subfiling_conf.require_ioc) { + /* Get IOC VFD defaults */ + if (H5Pget_fapl_ioc(ret_value, &ioc_conf) < 0) + TEST_ERROR; + + /* Set custom configuration */ + ioc_conf.thread_pool_size = thread_pool_size; + + if (H5Pset_fapl_ioc(subfiling_conf.ioc_fapl_id, &ioc_conf) < 0) + TEST_ERROR; + } + else { + if (H5Pset_fapl_sec2(subfiling_conf.ioc_fapl_id) < 0) + TEST_ERROR; + } + + if (H5Pset_fapl_subfiling(ret_value, &subfiling_conf) < 0) + TEST_ERROR; + } + + return ret_value; + +error: + if ((H5I_INVALID_HID != ret_value) && (H5Pclose(ret_value) < 0)) { + H5_FAILED(); + AT(); + } + + return H5I_INVALID_HID; +} + +/* + * A simple test that creates and closes a file with the + * subfiling VFD + */ +#define SUBF_FILENAME "test_subfiling_basic_create.h5" +static void +test_create_and_close(void) +{ + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("file creation and immediate close"); + + /* Get a default Subfiling FAPL */ + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME + +/* + * Test to check that Subfiling configuration file matches + * what is expected for a given configuration + */ +#define SUBF_FILENAME "test_subfiling_config_file.h5" +static void +test_config_file(void) +{ + H5FD_subfiling_params_t cfg; + int64_t stripe_size; + int64_t read_stripe_size; + FILE *config_file; + char *config_filename = NULL; + char *config_buf = NULL; + long config_file_len; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + int read_stripe_count; + int read_aggr_count; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("subfiling configuration file format"); + + /* + * Choose a random Subfiling stripe size between + * the smallest allowed value and 32MiB + */ + if (mpi_rank == 0) { + stripe_size = (rand() % (H5FD_SUBFILING_DEFAULT_STRIPE_SIZE - SUBFILING_MIN_STRIPE_SIZE + 1)) + + SUBFILING_MIN_STRIPE_SIZE; + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Bcast(&stripe_size, 1, MPI_INT64_T, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + } + + cfg.ioc_selection = SELECT_IOC_ONE_PER_NODE; + cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : stripe_size; + cfg.stripe_count = num_iocs_g > 1 ? (num_iocs_g / 2) : 1; + + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + if (MAINPROCESS) { + h5_stat_t file_info; + char *resolved_path; + char *subfile_dir; + char *subfile_name; + char *tmp_buf; + char *substr; + char scan_format[256]; + int num_digits; + + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + config_filename = HDmalloc(PATH_MAX); + VRFY(config_filename, "HDmalloc succeeded"); + + HDsnprintf(config_filename, PATH_MAX, H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino); + + config_file = HDfopen(config_filename, "r"); + VRFY(config_file, "HDfopen succeeded"); + + HDfree(config_filename); + + VRFY((HDfseek(config_file, 0, SEEK_END) >= 0), "HDfseek succeeded"); + + config_file_len = HDftell(config_file); + VRFY((config_file_len > 0), "HDftell succeeded"); + + VRFY((HDfseek(config_file, 0, SEEK_SET) >= 0), "HDfseek succeeded"); + + config_buf = HDmalloc((size_t)config_file_len + 1); + VRFY(config_buf, "HDmalloc succeeded"); + + VRFY((HDfread(config_buf, (size_t)config_file_len, 1, config_file) == 1), "HDfread succeeded"); + config_buf[config_file_len] = '\0'; + + /* Check the stripe_size field in the configuration file */ + substr = HDstrstr(config_buf, "stripe_size"); + VRFY(substr, "HDstrstr succeeded"); + + VRFY((HDsscanf(substr, "stripe_size=%" PRId64, &read_stripe_size) == 1), "HDsscanf succeeded"); + VRFY((read_stripe_size == cfg.stripe_size), "Stripe size comparison succeeded"); + + /* Check the aggregator_count field in the configuration file */ + substr = HDstrstr(config_buf, "aggregator_count"); + VRFY(substr, "HDstrstr succeeded"); + + VRFY((HDsscanf(substr, "aggregator_count=%d", &read_aggr_count) == 1), "HDsscanf succeeded"); + if (cfg.stripe_count < num_iocs_g) + VRFY((read_aggr_count == cfg.stripe_count), "Aggregator count comparison succeeded"); + else + VRFY((read_aggr_count == num_iocs_g), "Aggregator count comparison succeeded"); + + /* Check the subfile_count field in the configuration file */ + substr = HDstrstr(config_buf, "subfile_count"); + VRFY(substr, "HDstrstr succeeded"); + + VRFY((HDsscanf(substr, "subfile_count=%d", &read_stripe_count) == 1), "HDsscanf succeeded"); + VRFY((read_stripe_count == cfg.stripe_count), "Stripe count comparison succeeded"); + + /* Check the hdf5_file and subfile_dir fields in the configuration file */ + resolved_path = HDrealpath(SUBF_FILENAME, NULL); + VRFY(resolved_path, "HDrealpath succeeded"); + + VRFY((H5_dirname(resolved_path, &subfile_dir) >= 0), "H5_dirname succeeded"); + + tmp_buf = HDmalloc(PATH_MAX); + VRFY(tmp_buf, "HDmalloc succeeded"); + + substr = HDstrstr(config_buf, "hdf5_file"); + VRFY(substr, "HDstrstr succeeded"); + + HDsnprintf(scan_format, sizeof(scan_format), "hdf5_file=%%%zus", (size_t)(PATH_MAX - 1)); + VRFY((HDsscanf(substr, scan_format, tmp_buf) == 1), "HDsscanf succeeded"); + + VRFY((HDstrcmp(tmp_buf, resolved_path) == 0), "HDstrcmp succeeded"); + + substr = HDstrstr(config_buf, "subfile_dir"); + VRFY(substr, "HDstrstr succeeded"); + + HDsnprintf(scan_format, sizeof(scan_format), "subfile_dir=%%%zus", (size_t)(PATH_MAX - 1)); + VRFY((HDsscanf(substr, scan_format, tmp_buf) == 1), "HDsscanf succeeded"); + + VRFY((HDstrcmp(tmp_buf, subfile_dir) == 0), "HDstrcmp succeeded"); + + HDfree(tmp_buf); + H5MM_free(subfile_dir); + HDfree(resolved_path); + + subfile_name = HDmalloc(PATH_MAX); + VRFY(subfile_name, "HDmalloc succeeded"); + + /* Verify the name of each subfile is in the configuration file */ + num_digits = (int)(HDlog10(cfg.stripe_count) + 1); + for (size_t i = 0; i < (size_t)cfg.stripe_count; i++) { + HDsnprintf(subfile_name, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, (int)i + 1, cfg.stripe_count); + + substr = HDstrstr(config_buf, subfile_name); + VRFY(substr, "HDstrstr succeeded"); + } + + /* Verify that there aren't too many subfiles */ + HDsnprintf(subfile_name, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, (int)cfg.stripe_count + 1, cfg.stripe_count); + substr = HDstrstr(config_buf, subfile_name); + VRFY(substr == NULL, "HDstrstr correctly failed"); + + HDfree(subfile_name); + HDfree(config_buf); + + VRFY((HDfclose(config_file) >= 0), "HDfclose on configuration file succeeded"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME + +/* + * Test a few different Subfiling stripe sizes with a fixed + * stripe count + */ +/* TODO: Test collective I/O as well when support is implemented */ +#define SUBF_FILENAME "test_subfiling_stripe_sizes.h5" +#define SUBF_NITER 10 +static void +test_stripe_sizes(void) +{ + H5FD_t *file_ptr = NULL; + void *write_buf = NULL; + char *tmp_filename = NULL; + hid_t dxpl_id = H5I_INVALID_HID; + int num_subfiles; + int num_digits; + hid_t fapl_id = H5I_INVALID_HID; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("random subfiling stripe sizes"); + + tmp_filename = HDmalloc(PATH_MAX); + VRFY(tmp_filename, "HDmalloc succeeded"); + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + VRFY((dxpl_id >= 0), "DCPL creation succeeded"); + + for (size_t i = 0; i < SUBF_NITER; i++) { + H5FD_subfiling_params_t cfg; + h5_stat_size_t file_size; + const void *c_write_buf; + h5_stat_t file_info; + int64_t file_size64; + int64_t stripe_size; + haddr_t file_end_addr; + haddr_t write_addr; + size_t nbytes; + herr_t write_status; + hid_t file_id; + + /* + * Choose a random Subfiling stripe size between + * the smallest allowed value and the default value + */ + if (mpi_rank == 0) { + stripe_size = (rand() % (H5FD_SUBFILING_DEFAULT_STRIPE_SIZE - SUBFILING_MIN_STRIPE_SIZE + 1)) + + SUBFILING_MIN_STRIPE_SIZE; + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Bcast(&stripe_size, 1, MPI_INT64_T, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + } + + cfg.ioc_selection = SELECT_IOC_ONE_PER_NODE; + cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : stripe_size; + cfg.stripe_count = 1; + + /* First, try I/O with a single rank */ + if (MAINPROCESS) { + FILE *subfile_ptr; + + num_subfiles = 1; + num_digits = (int)(HDlog10(num_subfiles) + 1); + + nbytes = (size_t)(cfg.stripe_size * num_subfiles); + + write_buf = HDmalloc(nbytes); + VRFY(write_buf, "HDmalloc succeeded"); + + HDmemset(write_buf, 255, nbytes); + + c_write_buf = write_buf; + + fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, TRUE, &cfg, + H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + /* Create and close file with H5Fcreate to setup superblock */ + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + + /* Re-open file through H5FDopen for direct writes */ + file_ptr = H5FDopen(SUBF_FILENAME, H5F_ACC_RDWR, fapl_id, HADDR_UNDEF); + VRFY(file_ptr, "H5FDopen succeeded"); + + /* + * Get the current file size to see where we can safely + * write to in the file without overwriting the superblock + */ + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + file_size = (h5_stat_size_t)file_info.st_size; + + H5_CHECK_OVERFLOW(file_size, h5_stat_size_t, haddr_t); + file_end_addr = (haddr_t)file_size; + + /* Set independent I/O on DXPL */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT) >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Set EOA for following write call */ + VRFY((H5FDset_eoa(file_ptr, H5FD_MEM_DEFAULT, file_end_addr + nbytes) >= 0), + "H5FDset_eoa succeeded"); + + /* + * Write "number of IOCs" X "stripe size" bytes to the file + * and ensure that we have "number of IOCs" subfiles, each + * with a size of at least "stripe size" bytes. The first + * (few) subfile(s) may be a bit larger due to file metadata. + */ + write_addr = file_end_addr; + write_status = H5FDwrite(file_ptr, H5FD_MEM_DRAW, dxpl_id, write_addr, nbytes, c_write_buf); + VRFY((write_status >= 0), "H5FDwrite succeeded"); + + file_end_addr += nbytes; + + VRFY((H5FDtruncate(file_ptr, dxpl_id, 0) >= 0), "H5FDtruncate succeeded"); + + for (int j = 0; j < num_subfiles; j++) { + h5_stat_size_t subfile_size; + h5_stat_t subfile_info; + + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + + /* Check file size */ + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; + + VRFY((subfile_size >= cfg.stripe_size), "File size verification succeeded"); + } + + /* Verify that there aren't too many subfiles */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles); + + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + + /* Set EOA for following write call */ + VRFY((H5FDset_eoa(file_ptr, H5FD_MEM_DEFAULT, file_end_addr + nbytes) >= 0), + "H5FDset_eoa succeeded"); + + /* + * Write another round of "number of IOCs" X "stripe size" + * bytes to the file using vector I/O and ensure we have + * "number of IOCs" subfiles, each with a size of at least + * 2 * "stripe size" bytes. The first (few) subfile(s) may + * be a bit larger due to file metadata. + */ + H5FD_mem_t write_type = H5FD_MEM_DRAW; + write_addr = file_end_addr; + write_status = + H5FDwrite_vector(file_ptr, dxpl_id, 1, &write_type, &write_addr, &nbytes, &c_write_buf); + VRFY((write_status >= 0), "H5FDwrite_vector succeeded"); + + VRFY((H5FDtruncate(file_ptr, dxpl_id, 0) >= 0), "H5FDtruncate succeeded"); + + for (int j = 0; j < num_subfiles; j++) { + h5_stat_size_t subfile_size; + h5_stat_t subfile_info; + + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + + /* Check file size */ + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; + + VRFY((subfile_size >= 2 * cfg.stripe_size), "File size verification succeeded"); + } + + /* Verify that there aren't too many subfiles */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles); + + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + + HDfree(write_buf); + write_buf = NULL; + + VRFY((H5FDclose(file_ptr) >= 0), "H5FDclose succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + /* Next, try I/O with all ranks */ + + cfg.stripe_count = num_iocs_g; + + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + /* Create and close file with H5Fcreate to setup superblock */ + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + + /* Re-open file through H5FDopen for direct writes */ + file_ptr = H5FDopen(SUBF_FILENAME, H5F_ACC_RDWR, fapl_id, HADDR_UNDEF); + VRFY(file_ptr, "H5FDopen succeeded"); + + num_subfiles = num_iocs_g; + num_digits = (int)(HDlog10(num_subfiles) + 1); + + nbytes = (size_t)(cfg.stripe_size * num_subfiles); + + write_buf = HDmalloc(nbytes); + VRFY(write_buf, "HDmalloc succeeded"); + + HDmemset(write_buf, 255, nbytes); + + c_write_buf = write_buf; + + /* + * Get the current file size to see where we can safely + * write to in the file without overwriting the superblock + */ + if (MAINPROCESS) { + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + file_size = (h5_stat_size_t)file_info.st_size; + + H5_CHECK_OVERFLOW(file_size, h5_stat_size_t, int64_t); + file_size64 = (int64_t)file_size; + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Bcast(&file_size64, 1, MPI_INT64_T, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + } + + H5_CHECK_OVERFLOW(file_size64, int64_t, haddr_t); + file_end_addr = (haddr_t)file_size64; + + /* Set independent I/O on DXPL */ + VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_INDEPENDENT) >= 0), "H5Pset_dxpl_mpio succeeded"); + + /* Set EOA for following write call */ + VRFY((H5FDset_eoa(file_ptr, H5FD_MEM_DEFAULT, file_end_addr + ((size_t)mpi_size * nbytes)) >= 0), + "H5FDset_eoa succeeded"); + + /* + * Write "number of IOCs" X "stripe size" bytes to the file + * from each rank and ensure that we have "number of IOCs" + * subfiles, each with a size of at least "mpi size" * "stripe size" + * bytes. The first (few) subfile(s) may be a bit larger + * due to file metadata. + */ + write_addr = file_end_addr + ((size_t)mpi_rank * nbytes); + write_status = H5FDwrite(file_ptr, H5FD_MEM_DRAW, dxpl_id, write_addr, nbytes, c_write_buf); + VRFY((write_status >= 0), "H5FDwrite succeeded"); + + file_end_addr += ((size_t)mpi_size * nbytes); + + VRFY((H5FDtruncate(file_ptr, dxpl_id, 0) >= 0), "H5FDtruncate succeeded"); + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + if (MAINPROCESS) { + FILE *subfile_ptr; + + for (int j = 0; j < num_subfiles; j++) { + h5_stat_size_t subfile_size; + h5_stat_t subfile_info; + + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + + /* Check file size */ + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; + + VRFY((subfile_size >= (mpi_size * cfg.stripe_size)), "File size verification succeeded"); + } + + /* Verify that there aren't too many subfiles */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles); + + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + /* Set EOA for following write call */ + VRFY((H5FDset_eoa(file_ptr, H5FD_MEM_DEFAULT, file_end_addr + ((size_t)mpi_size * nbytes)) >= 0), + "H5FDset_eoa succeeded"); + + /* + * Write another round of "number of IOCs" X "stripe size" + * bytes to the file from each rank using vector I/O and + * ensure we have "number of IOCs" subfiles, each with a + * size of at least 2 * "mpi size" * "stripe size" bytes. + * The first (few) subfile(s) may be a bit larger due to + * file metadata. + */ + H5FD_mem_t write_type = H5FD_MEM_DRAW; + write_addr = file_end_addr + ((size_t)mpi_rank * nbytes); + write_status = + H5FDwrite_vector(file_ptr, dxpl_id, 1, &write_type, &write_addr, &nbytes, &c_write_buf); + VRFY((write_status >= 0), "H5FDwrite_vector succeeded"); + + VRFY((H5FDtruncate(file_ptr, dxpl_id, 0) >= 0), "H5FDtruncate succeeded"); + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + if (MAINPROCESS) { + FILE *subfile_ptr; + + for (int j = 0; j < num_subfiles; j++) { + h5_stat_size_t subfile_size; + h5_stat_t subfile_info; + + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + + /* Check file size */ + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; + + VRFY((subfile_size >= (2 * mpi_size * cfg.stripe_size)), "File size verification succeeded"); + } + + /* Verify that there aren't too many subfiles */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles); + + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + } + + VRFY((H5FDclose(file_ptr) >= 0), "H5FDclose succeeded"); + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + HDfree(write_buf); + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + } + + HDfree(tmp_filename); + + VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded"); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef SUBF_NITER + +/* + * Test that opening a file with a different stripe + * size/count than was used when creating the file + * results in the original stripe size/count being + * used. As there is currently no API to check the + * exact values used, we rely on the assumption that + * using a different stripe size/count would result + * in data verification failures. + */ +#define SUBF_FILENAME "test_subfiling_read_different_stripe_sizes.h5" +#define SUBF_HDF5_TYPE H5T_NATIVE_INT +#define SUBF_C_TYPE int +static void +test_read_different_stripe_size(void) +{ + H5FD_subfiling_params_t cfg; + hsize_t start[1]; + hsize_t count[1]; + hsize_t dset_dims[1]; + size_t target_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + char *tmp_filename = NULL; + void *buf = NULL; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("file re-opening with different stripe size"); + + tmp_filename = HDmalloc(PATH_MAX); + VRFY(tmp_filename, "HDmalloc succeeded"); + + /* Use a 1MiB stripe size and a subfile for each IOC */ + cfg.ioc_selection = SELECT_IOC_ONE_PER_NODE; + cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : 1048576; + cfg.stripe_count = num_iocs_g; + + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + target_size = (size_t)cfg.stripe_size; + + /* Nudge stripe size to be multiple of C type size */ + if ((target_size % sizeof(SUBF_C_TYPE)) != 0) + target_size += sizeof(SUBF_C_TYPE) - (target_size % sizeof(SUBF_C_TYPE)); + + target_size *= (size_t)mpi_size; + + VRFY(((target_size % sizeof(SUBF_C_TYPE)) == 0), "target size check succeeded"); + + dset_dims[0] = (hsize_t)(target_size / sizeof(SUBF_C_TYPE)); + + fspace_id = H5Screate_simple(1, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Select hyperslab */ + count[0] = dset_dims[0] / (hsize_t)mpi_size; + start[0] = (hsize_t)mpi_rank * count[0]; + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + buf = HDmalloc(count[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDmalloc succeeded"); + + for (size_t i = 0; i < count[0]; i++) + ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); + + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset write succeeded"); + + HDfree(buf); + buf = NULL; + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + /* Ensure all the subfiles are present */ + if (MAINPROCESS) { + h5_stat_t file_info; + FILE *subfile_ptr; + int num_subfiles = cfg.stripe_count; + int num_digits = (int)(HDlog10(num_subfiles) + 1); + + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + for (int j = 0; j < num_subfiles; j++) { + h5_stat_size_t subfile_size; + h5_stat_t subfile_info; + + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + + /* Check file size */ + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + subfile_size = (h5_stat_size_t)subfile_info.st_size; + + VRFY((subfile_size >= cfg.stripe_size), "File size verification succeeded"); + } + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + /* Add a bit to the stripe size and specify a few more subfiles */ + cfg.stripe_size += (cfg.stripe_size / 2); + cfg.stripe_count *= 2; + + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, TRUE, &cfg, H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + dset_id = H5Dopen2(file_id, "DSET", H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + fspace_id = H5Dget_space(dset_id); + VRFY((fspace_id >= 0), "Dataspace retrieval succeeded"); + + /* Select hyperslab */ + count[0] = dset_dims[0] / (hsize_t)mpi_size; + start[0] = (hsize_t)mpi_rank * count[0]; + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + buf = HDcalloc(1, count[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset read succeeded"); + + for (size_t i = 0; i < count[0]; i++) { + SUBF_C_TYPE buf_value = ((SUBF_C_TYPE *)buf)[i]; + + VRFY((buf_value == (SUBF_C_TYPE)((size_t)mpi_rank + i)), "data verification succeeded"); + } + + HDfree(buf); + buf = NULL; + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + /* Ensure only the original subfiles are present */ + if (MAINPROCESS) { + h5_stat_t file_info; + FILE *subfile_ptr; + int num_subfiles = cfg.stripe_count; + int num_digits = (int)(HDlog10(num_subfiles / 2) + 1); + + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + for (int j = 0; j < num_subfiles; j++) { + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, j + 1, num_subfiles / 2); + + if (j < (num_subfiles / 2)) { + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + } + else { + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + } + } + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + HDfree(tmp_filename); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef SUBF_HDF5_TYPE +#undef SUBF_C_TYPE + +/* + * Test that everything works correctly when a file is + * pre-created on rank 0 with a specified target number + * of subfiles and then read back on all ranks. + */ +#define SUBF_FILENAME "test_subfiling_precreate_rank_0.h5" +#define SUBF_HDF5_TYPE H5T_NATIVE_INT +#define SUBF_C_TYPE int +static void +test_subfiling_precreate_rank_0(void) +{ + hsize_t start[1]; + hsize_t count[1]; + hsize_t dset_dims[1]; + size_t target_size; + size_t n_elements_per_rank; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *buf = NULL; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("file pre-creation on rank 0"); + + /* Calculate target size for dataset to stripe it across available IOCs */ + target_size = (stripe_size_g > 0) ? (size_t)stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; + + /* Nudge stripe size to be multiple of C type size */ + if ((target_size % sizeof(SUBF_C_TYPE)) != 0) + target_size += sizeof(SUBF_C_TYPE) - (target_size % sizeof(SUBF_C_TYPE)); + + target_size *= (size_t)mpi_size; + + VRFY(((target_size % sizeof(SUBF_C_TYPE)) == 0), "target size check succeeded"); + + if (stripe_size_g > 0) { + VRFY((target_size >= (size_t)stripe_size_g), "target size check succeeded"); + } + else { + VRFY((target_size >= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE), "target size check succeeded"); + } + + dset_dims[0] = (hsize_t)(target_size / sizeof(SUBF_C_TYPE)); + n_elements_per_rank = (dset_dims[0] / (size_t)mpi_size); + + /* Create and populate file on rank 0 only */ + if (MAINPROCESS) { + H5FD_subfiling_params_t cfg; + h5_stat_size_t file_size; + h5_stat_t file_info; + FILE *subfile_ptr; + char *tmp_filename = NULL; + int num_subfiles; + int num_digits; + + /* Create a file consisting of 1 subfile per application I/O concentrator */ + cfg.ioc_selection = SELECT_IOC_ONE_PER_NODE; + cfg.stripe_size = (stripe_size_g > 0) ? stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; + cfg.stripe_count = num_iocs_g; + + fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, TRUE, &cfg, + H5FD_IOC_DEFAULT_THREAD_POOL_SIZE); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + fspace_id = H5Screate_simple(1, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dset_id = + H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + buf = HDmalloc(dset_dims[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDmalloc succeeded"); + + for (size_t i = 0; i < dset_dims[0]; i++) + ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((i / n_elements_per_rank) + (i % n_elements_per_rank)); + + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset write succeeded"); + + HDfree(buf); + buf = NULL; + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + + /* + * Ensure that all the subfiles are present + */ + + num_subfiles = cfg.stripe_count; + num_digits = (int)(HDlog10(num_subfiles) + 1); + + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + tmp_filename = HDmalloc(PATH_MAX); + VRFY(tmp_filename, "HDmalloc succeeded"); + + for (int i = 0; i < num_subfiles; i++) { + h5_stat_t subfile_info; + + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, i + 1, num_subfiles); + + /* Ensure file exists */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr, "HDfopen on subfile succeeded"); + VRFY((HDfclose(subfile_ptr) >= 0), "HDfclose on subfile succeeded"); + + /* Check file size */ + VRFY((HDstat(tmp_filename, &subfile_info) >= 0), "HDstat succeeded"); + file_size = (h5_stat_size_t)subfile_info.st_size; + + VRFY((file_size >= cfg.stripe_size), "File size verification succeeded"); + } + + /* Verify that there aren't too many subfiles */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino, num_digits, num_subfiles + 1, num_subfiles); + + /* Ensure file doesn't exist */ + subfile_ptr = HDfopen(tmp_filename, "r"); + VRFY(subfile_ptr == NULL, "HDfopen on subfile correctly failed"); + + HDfree(tmp_filename); + tmp_filename = NULL; + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + /* Open the file on all ranks */ + + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + dset_id = H5Dopen2(file_id, "DSET", H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + fspace_id = H5Dget_space(dset_id); + VRFY((fspace_id >= 0), "Dataset dataspace retrieval succeeded"); + + /* Select hyperslab */ + count[0] = n_elements_per_rank; + start[0] = (hsize_t)mpi_rank * count[0]; + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + buf = HDcalloc(1, count[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset read succeeded"); + + for (size_t i = 0; i < n_elements_per_rank; i++) { + SUBF_C_TYPE buf_value = ((SUBF_C_TYPE *)buf)[i]; + + VRFY((buf_value == (SUBF_C_TYPE)((size_t)mpi_rank + i)), "data verification succeeded"); + } + + HDfree(buf); + buf = NULL; + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef SUBF_HDF5_TYPE +#undef SUBF_C_TYPE + +/* + * Test to check that an HDF5 file created with the + * Subfiling VFD can be read back with a single MPI + * rank + */ +#define SUBF_FILENAME "test_subfiling_write_many_read_one.h5" +#define SUBF_HDF5_TYPE H5T_NATIVE_INT +#define SUBF_C_TYPE int +static void +test_subfiling_write_many_read_one(void) +{ + hsize_t start[1]; + hsize_t count[1]; + hsize_t dset_dims[1]; + size_t target_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *buf = NULL; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("reading back file with single MPI rank"); + + /* Get a default Subfiling FAPL */ + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + /* Create file on all ranks */ + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + /* Calculate target size for dataset to stripe it across available IOCs */ + target_size = (stripe_size_g > 0) ? (size_t)stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; + + /* Nudge stripe size to be multiple of C type size */ + if ((target_size % sizeof(SUBF_C_TYPE)) != 0) + target_size += sizeof(SUBF_C_TYPE) - (target_size % sizeof(SUBF_C_TYPE)); + + target_size *= (size_t)mpi_size; + + VRFY(((target_size % sizeof(SUBF_C_TYPE)) == 0), "target size check succeeded"); + + if (stripe_size_g > 0) { + VRFY((target_size >= (size_t)stripe_size_g), "target size check succeeded"); + } + else { + VRFY((target_size >= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE), "target size check succeeded"); + } + + dset_dims[0] = (hsize_t)(target_size / sizeof(SUBF_C_TYPE)); + + fspace_id = H5Screate_simple(1, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Select hyperslab */ + count[0] = dset_dims[0] / (hsize_t)mpi_size; + start[0] = (hsize_t)mpi_rank * count[0]; + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + buf = HDmalloc(count[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDmalloc succeeded"); + + for (size_t i = 0; i < count[0]; i++) + ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); + + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset write succeeded"); + + HDfree(buf); + buf = NULL; + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + if (MAINPROCESS) { + fapl_id = create_subfiling_ioc_fapl(MPI_COMM_SELF, MPI_INFO_NULL, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + dset_id = H5Dopen2(file_id, "DSET", H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + buf = HDcalloc(1, target_size); + VRFY(buf, "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, buf) >= 0), + "Dataset read succeeded"); + + for (size_t i = 0; i < (size_t)mpi_size; i++) { + for (size_t j = 0; j < count[0]; j++) { + SUBF_C_TYPE buf_value = ((SUBF_C_TYPE *)buf)[(i * count[0]) + j]; + + VRFY((buf_value == (SUBF_C_TYPE)(j + i)), "data verification succeeded"); + } + } + + HDfree(buf); + buf = NULL; + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef SUBF_HDF5_TYPE +#undef SUBF_C_TYPE + +/* + * Test to check that an HDF5 file created with the + * Subfiling VFD can be read back with less MPI ranks + * than the file was written with + */ +#define SUBF_FILENAME "test_subfiling_write_many_read_few.h5" +#define SUBF_HDF5_TYPE H5T_NATIVE_INT +#define SUBF_C_TYPE int +static void +test_subfiling_write_many_read_few(void) +{ + MPI_Comm sub_comm = MPI_COMM_NULL; + hsize_t start[1]; + hsize_t count[1]; + hsize_t dset_dims[1]; + hbool_t reading_file = FALSE; + size_t target_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *buf = NULL; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("reading back file with fewer MPI ranks than written with"); + + /* + * Skip this test for an MPI communicator size of 1, + * as the test wouldn't really be meaningful + */ + if (mpi_size == 1) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + /* Get a default Subfiling FAPL */ + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + /* Create file on all ranks */ + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + /* Calculate target size for dataset to stripe it across available IOCs */ + target_size = (stripe_size_g > 0) ? (size_t)stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; + + /* Nudge stripe size to be multiple of C type size */ + if ((target_size % sizeof(SUBF_C_TYPE)) != 0) + target_size += sizeof(SUBF_C_TYPE) - (target_size % sizeof(SUBF_C_TYPE)); + + target_size *= (size_t)mpi_size; + + VRFY(((target_size % sizeof(SUBF_C_TYPE)) == 0), "target size check succeeded"); + + if (stripe_size_g > 0) { + VRFY((target_size >= (size_t)stripe_size_g), "target size check succeeded"); + } + else { + VRFY((target_size >= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE), "target size check succeeded"); + } + + dset_dims[0] = (hsize_t)(target_size / sizeof(SUBF_C_TYPE)); + + fspace_id = H5Screate_simple(1, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Select hyperslab */ + count[0] = dset_dims[0] / (hsize_t)mpi_size; + start[0] = (hsize_t)mpi_rank * count[0]; + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + buf = HDmalloc(count[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDmalloc succeeded"); + + for (size_t i = 0; i < count[0]; i++) + ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); + + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset write succeeded"); + + HDfree(buf); + buf = NULL; + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + /* + * If only using 1 node, read file back with a + * few ranks from that node. Otherwise, read file + * back with 1 MPI rank per node + */ + if (num_nodes_g == 1) { + int color; + + if (mpi_size < 2) { + color = 1; + } + else if (mpi_size < 4) { + color = (mpi_rank < (mpi_size / 2)); + } + else { + color = (mpi_rank < (mpi_size / 4)); + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Comm_split(comm_g, color, mpi_rank, &sub_comm); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Comm_split succeeded"); + } + + if (color) + reading_file = TRUE; + } + else { + if (node_local_rank == 0) { + sub_comm = ioc_comm; + reading_file = TRUE; + } + } + + if (reading_file) { + fapl_id = create_subfiling_ioc_fapl(sub_comm, MPI_INFO_NULL, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, fapl_id); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + dset_id = H5Dopen2(file_id, "DSET", H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + buf = HDcalloc(1, target_size); + VRFY(buf, "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, buf) >= 0), + "Dataset read succeeded"); + + for (size_t i = 0; i < (size_t)mpi_size; i++) { + for (size_t j = 0; j < count[0]; j++) { + SUBF_C_TYPE buf_value = ((SUBF_C_TYPE *)buf)[(i * count[0]) + j]; + + VRFY((buf_value == (SUBF_C_TYPE)(j + i)), "data verification succeeded"); + } + } + + HDfree(buf); + buf = NULL; + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + } + + if ((sub_comm != MPI_COMM_NULL) && (num_nodes_g == 1)) { + mpi_code_g = MPI_Comm_free(&sub_comm); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Comm_free succeeded"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + + CHECK_PASSED(); +} +#undef SUBF_FILENAME +#undef SUBF_HDF5_TYPE +#undef SUBF_C_TYPE + +/* + * Test that the subfiling file can be read with the + * sec2 driver after being fused back together with + * the h5fuse utility + */ +#define SUBF_FILENAME "test_subfiling_h5fuse.h5" +#define SUBF_HDF5_TYPE H5T_NATIVE_INT +#define SUBF_C_TYPE int +static void +test_subfiling_h5fuse(void) +{ + hsize_t start[1]; + hsize_t count[1]; + hsize_t dset_dims[1]; + size_t target_size; + hid_t file_id = H5I_INVALID_HID; + hid_t fapl_id = H5I_INVALID_HID; + hid_t dset_id = H5I_INVALID_HID; + hid_t fspace_id = H5I_INVALID_HID; + void *buf = NULL; + int skip_test = 0; + + curr_nerrors = nerrors; + + if (MAINPROCESS) + TESTING_2("h5fuse utility"); + +#if defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) + + /* + * Check if h5fuse script exists in current directory; + * Skip test if it doesn't + */ + if (MAINPROCESS) { + FILE *h5fuse_script; + + h5fuse_script = HDfopen("h5fuse.sh", "r"); + if (h5fuse_script) + HDfclose(h5fuse_script); + else + skip_test = 1; + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Bcast(&skip_test, 1, MPI_INT, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + } + + if (skip_test) { + if (MAINPROCESS) + SKIPPED(); + return; + } + + /* Get a default Subfiling FAPL */ + fapl_id = create_subfiling_ioc_fapl(comm_g, info_g, FALSE, NULL, 0); + VRFY((fapl_id >= 0), "FAPL creation succeeded"); + + /* Create file on all ranks */ + file_id = H5Fcreate(SUBF_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id); + VRFY((file_id >= 0), "H5Fcreate succeeded"); + + /* Calculate target size for dataset to stripe it across available IOCs */ + target_size = (stripe_size_g > 0) ? (size_t)stripe_size_g : H5FD_SUBFILING_DEFAULT_STRIPE_SIZE; + + /* Nudge stripe size to be multiple of C type size */ + if ((target_size % sizeof(SUBF_C_TYPE)) != 0) + target_size += sizeof(SUBF_C_TYPE) - (target_size % sizeof(SUBF_C_TYPE)); + + target_size *= (size_t)mpi_size; + + VRFY(((target_size % sizeof(SUBF_C_TYPE)) == 0), "target size check succeeded"); + + if (stripe_size_g > 0) { + VRFY((target_size >= (size_t)stripe_size_g), "target size check succeeded"); + } + else { + VRFY((target_size >= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE), "target size check succeeded"); + } + + dset_dims[0] = (hsize_t)(target_size / sizeof(SUBF_C_TYPE)); + + fspace_id = H5Screate_simple(1, dset_dims, NULL); + VRFY((fspace_id >= 0), "H5Screate_simple succeeded"); + + dset_id = H5Dcreate2(file_id, "DSET", SUBF_HDF5_TYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset creation succeeded"); + + /* Select hyperslab */ + count[0] = dset_dims[0] / (hsize_t)mpi_size; + start[0] = (hsize_t)mpi_rank * count[0]; + VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) >= 0), + "H5Sselect_hyperslab succeeded"); + + buf = HDmalloc(count[0] * sizeof(SUBF_C_TYPE)); + VRFY(buf, "HDmalloc succeeded"); + + for (size_t i = 0; i < count[0]; i++) + ((SUBF_C_TYPE *)buf)[i] = (SUBF_C_TYPE)((size_t)mpi_rank + i); + + VRFY((H5Dwrite(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, fspace_id, H5P_DEFAULT, buf) >= 0), + "Dataset write succeeded"); + + HDfree(buf); + buf = NULL; + + VRFY((H5Sclose(fspace_id) >= 0), "File dataspace close succeeded"); + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + + if (MAINPROCESS) { + h5_stat_t file_info; + pid_t pid = 0; + pid_t tmppid; + int status; + + pid = HDfork(); + VRFY(pid >= 0, "HDfork succeeded"); + + if (pid == 0) { + char *tmp_filename; + char *args[6]; + + tmp_filename = HDmalloc(PATH_MAX); + VRFY(tmp_filename, "HDmalloc succeeded"); + + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + + /* Generate name for configuration file */ + HDsnprintf(tmp_filename, PATH_MAX, H5FD_SUBFILING_CONFIG_FILENAME_TEMPLATE, SUBF_FILENAME, + (uint64_t)file_info.st_ino); + + args[0] = HDstrdup("env"); + args[1] = HDstrdup("sh"); + args[2] = HDstrdup("h5fuse.sh"); + args[3] = HDstrdup("-f"); + args[4] = tmp_filename; + args[5] = NULL; + + /* Call h5fuse script from MPI rank 0 */ + HDexecvp("env", args); + } + else { + tmppid = HDwaitpid(pid, &status, 0); + VRFY(tmppid >= 0, "HDwaitpid succeeded"); + + if (WIFEXITED(status)) { + int ret; + + if ((ret = WEXITSTATUS(status)) != 0) { + HDprintf("h5fuse process exited with error code %d\n", ret); + HDfflush(stdout); + MPI_Abort(comm_g, -1); + } + } + else { + HDprintf("h5fuse process terminated abnormally\n"); + HDfflush(stdout); + MPI_Abort(comm_g, -1); + } + } + + /* Verify the size of the fused file */ + VRFY((HDstat(SUBF_FILENAME, &file_info) >= 0), "HDstat succeeded"); + VRFY(((size_t)file_info.st_size >= target_size), "File size verification succeeded"); + + /* Re-open file with sec2 driver and verify the data */ + file_id = H5Fopen(SUBF_FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT); + VRFY((file_id >= 0), "H5Fopen succeeded"); + + dset_id = H5Dopen2(file_id, "DSET", H5P_DEFAULT); + VRFY((dset_id >= 0), "Dataset open succeeded"); + + buf = HDcalloc(1, target_size); + VRFY(buf, "HDcalloc succeeded"); + + VRFY((H5Dread(dset_id, SUBF_HDF5_TYPE, H5S_BLOCK, H5S_ALL, H5P_DEFAULT, buf) >= 0), + "Dataset read succeeded"); + + for (size_t i = 0; i < (size_t)mpi_size; i++) { + for (size_t j = 0; j < count[0]; j++) { + SUBF_C_TYPE buf_value = ((SUBF_C_TYPE *)buf)[(i * count[0]) + j]; + + VRFY((buf_value == (SUBF_C_TYPE)(j + i)), "data verification succeeded"); + } + } + + HDfree(buf); + buf = NULL; + + VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded"); + VRFY((H5Fclose(file_id) >= 0), "File close succeeded"); + } + + mpi_code_g = MPI_Barrier(comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Barrier succeeded"); + + H5E_BEGIN_TRY + { + H5Fdelete(SUBF_FILENAME, fapl_id); + } + H5E_END_TRY; + + VRFY((H5Pclose(fapl_id) >= 0), "FAPL close succeeded"); + + CHECK_PASSED(); +#else + SKIPPED(); +#endif +} +#undef SUBF_FILENAME +#undef SUBF_HDF5_TYPE +#undef SUBF_C_TYPE + +static void +parse_subfiling_env_vars(void) +{ + char *env_value; + + if (NULL != (env_value = HDgetenv(H5FD_SUBFILING_STRIPE_SIZE))) { + stripe_size_g = HDstrtoll(env_value, NULL, 0); + if ((ERANGE == errno) || (stripe_size_g <= 0)) + stripe_size_g = -1; + } + + if (NULL != (env_value = HDgetenv(H5FD_SUBFILING_IOC_PER_NODE))) { + ioc_per_node_g = HDstrtol(env_value, NULL, 0); + if ((ERANGE == errno) || (ioc_per_node_g <= 0)) + ioc_per_node_g = -1; + else if (ioc_per_node_g * num_nodes_g > mpi_size) + /* + * If the number of IOCs per node from the environment + * causes the total number of IOCs to exceed the number + * of MPI ranks available, the Subfiling VFD will simply + * use all of the MPI ranks on a node as IOCs + */ + ioc_per_node_g = node_local_size; + } + + if (NULL != (env_value = HDgetenv(H5FD_IOC_THREAD_POOL_SIZE))) { + ioc_thread_pool_size_g = HDatoi(env_value); + if (ioc_thread_pool_size_g <= 0) + ioc_thread_pool_size_g = -1; + } +} + +int +main(int argc, char **argv) +{ + unsigned seed; + int required = MPI_THREAD_MULTIPLE; + int provided = 0; + + HDcompile_assert(SUBFILING_MIN_STRIPE_SIZE <= H5FD_SUBFILING_DEFAULT_STRIPE_SIZE); + + /* Initialize MPI */ + if (MPI_SUCCESS != (mpi_code_g = MPI_Init_thread(&argc, &argv, required, &provided))) { + HDprintf("MPI_Init_thread failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_rank(comm_g, &mpi_rank))) { + HDprintf("MPI_Comm_rank failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + + if (provided != required) { + if (MAINPROCESS) + HDprintf("MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE\n"); + nerrors++; + goto exit; + } + + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_size(comm_g, &mpi_size))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_size failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + + /* Split communicator according to node-local ranks */ + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_split_type(comm_g, MPI_COMM_TYPE_SHARED, mpi_rank, + MPI_INFO_NULL, &node_local_comm))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_split_type failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_size(node_local_comm, &node_local_size))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_size failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_rank(node_local_comm, &node_local_rank))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_rank failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + + /* Get the number of nodes being run on */ + num_nodes_g = (node_local_rank == 0) ? 1 : 0; + if (MPI_SUCCESS != + (mpi_code_g = MPI_Allreduce(MPI_IN_PLACE, &num_nodes_g, 1, MPI_INT, MPI_SUM, comm_g))) { + if (MAINPROCESS) + HDprintf("MPI_Allreduce failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + + /* + * Split communicator according to rank value across nodes. + * If the SELECT_IOC_ONE_PER_NODE IOC selection strategy is + * used, each rank with a node local rank value of 0 will + * be an IOC in the new communicator. + */ + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_split(comm_g, node_local_rank, mpi_rank, &ioc_comm))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_split failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_size(ioc_comm, &ioc_comm_size))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_size failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + if (MPI_SUCCESS != (mpi_code_g = MPI_Comm_rank(ioc_comm, &ioc_comm_rank))) { + if (MAINPROCESS) + HDprintf("MPI_Comm_rank failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + + if (H5dont_atexit() < 0) { + if (MAINPROCESS) + HDprintf("Failed to turn off atexit processing. Continue.\n"); + } + + H5open(); + + /* Enable selection I/O using internal temporary workaround */ + H5_use_selection_io_g = TRUE; + + if (MAINPROCESS) { + HDprintf("Testing Subfiling VFD functionality\n"); + } + + TestAlarmOn(); + + /* + * Obtain and broadcast seed value since ranks + * aren't guaranteed to arrive here at exactly + * the same time and could end up out of sync + * with each other in regards to random number + * generation + */ + if (mpi_rank == 0) + seed = (unsigned)time(NULL); + + if (mpi_size > 1) { + if (MPI_SUCCESS != (mpi_code_g = MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, comm_g))) { + if (MAINPROCESS) + HDprintf("MPI_Bcast failed with error code %d\n", mpi_code_g); + nerrors++; + goto exit; + } + } + + srand(seed); + + if (MAINPROCESS) + HDprintf("Using seed: %u\n\n", seed); + + /* Grab values from environment variables if set */ + parse_subfiling_env_vars(); + + /* + * Assume that we use the "one IOC per node" selection + * strategy by default, with a possibly modified + * number of IOCs per node value + */ + num_iocs_g = (ioc_per_node_g > 0) ? (int)ioc_per_node_g * num_nodes_g : num_nodes_g; + if (num_iocs_g > mpi_size) + num_iocs_g = mpi_size; + + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } + } + + if (MAINPROCESS) + HDputs(""); + + /* + * Set any unset Subfiling environment variables and re-run + * the tests as a quick smoke check of whether those are + * working correctly + */ + if (stripe_size_g < 0) { + int64_t stripe_size; + char tmp[64]; + + /* + * Choose a random Subfiling stripe size between + * the smallest allowed value and the default value + */ + if (mpi_rank == 0) { + stripe_size = (rand() % (H5FD_SUBFILING_DEFAULT_STRIPE_SIZE - SUBFILING_MIN_STRIPE_SIZE + 1)) + + SUBFILING_MIN_STRIPE_SIZE; + } + + if (mpi_size > 1) { + mpi_code_g = MPI_Bcast(&stripe_size, 1, MPI_INT64_T, 0, comm_g); + VRFY((mpi_code_g == MPI_SUCCESS), "MPI_Bcast succeeded"); + } + + HDsnprintf(tmp, sizeof(tmp), "%" PRId64, stripe_size); + + if (HDsetenv(H5FD_SUBFILING_STRIPE_SIZE, tmp, 1) < 0) { + if (MAINPROCESS) + HDprintf("HDsetenv failed\n"); + nerrors++; + goto exit; + } + } + if (ioc_per_node_g < 0) { + const char *ioc_per_node_str; + + if (2 * num_nodes_g <= mpi_size) + ioc_per_node_str = "2"; + else + ioc_per_node_str = "1"; + + if (HDsetenv(H5FD_SUBFILING_IOC_PER_NODE, ioc_per_node_str, 1) < 0) { + if (MAINPROCESS) + HDprintf("HDsetenv failed\n"); + nerrors++; + goto exit; + } + } + if (ioc_thread_pool_size_g < 0) { + if (HDsetenv(H5FD_IOC_THREAD_POOL_SIZE, "2", 1) < 0) { + if (MAINPROCESS) + HDprintf("HDsetenv failed\n"); + nerrors++; + goto exit; + } + } + + /* Grab values from environment variables */ + parse_subfiling_env_vars(); + + /* + * Assume that we use the "one IOC per node" selection + * strategy by default, with a possibly modified + * number of IOCs per node value + */ + num_iocs_g = (ioc_per_node_g > 0) ? (int)ioc_per_node_g * num_nodes_g : num_nodes_g; + if (num_iocs_g > mpi_size) + num_iocs_g = mpi_size; + + if (MAINPROCESS) { + HDprintf("Re-running tests with environment variables set\n"); + } + + for (size_t i = 0; i < ARRAY_SIZE(tests); i++) { + if (MPI_SUCCESS == (mpi_code_g = MPI_Barrier(comm_g))) { + (*tests[i])(); + } + else { + if (MAINPROCESS) + MESG("MPI_Barrier failed"); + nerrors++; + } + } + + if (MAINPROCESS) + HDputs(""); + + if (nerrors) + goto exit; + + if (MAINPROCESS) + HDputs("All Subfiling VFD tests passed\n"); + +exit: + if (nerrors) { + if (MAINPROCESS) + HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors, nerrors > 1 ? "S" : ""); + } + + TestAlarmOff(); + + H5close(); + + if (MPI_COMM_WORLD != ioc_comm) + MPI_Comm_free(&ioc_comm); + if (MPI_COMM_WORLD != node_local_comm) + MPI_Comm_free(&node_local_comm); + + MPI_Finalize(); + + HDexit(nerrors ? EXIT_FAILURE : EXIT_SUCCESS); +} + +#else /* H5_HAVE_SUBFILING_VFD */ + +int +main(void) +{ + h5_reset(); + HDprintf("Testing Subfiling VFD functionality\n"); + HDprintf("SKIPPED - Subfiling VFD not built\n"); + HDexit(EXIT_SUCCESS); +} + +#endif /* H5_HAVE_SUBFILING_VFD */ diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c new file mode 100644 index 0000000..512aa5b --- /dev/null +++ b/testpar/t_vfd.c @@ -0,0 +1,4460 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Programmer: John Mainzer + * + * This file is a catchall for parallel VFD tests. + */ + +#include "testphdf5.h" + +#ifdef H5_HAVE_SUBFILING_VFD +#include "H5FDsubfiling.h" +#include "H5FDioc.h" +#endif + +/* Must be a power of 2. Reducing it below 1024 may cause problems */ +#define INTS_PER_RANK 1024 + +/* global variable declarations: */ + +static MPI_Comm comm = MPI_COMM_WORLD; +static MPI_Info info = MPI_INFO_NULL; + +hbool_t pass = TRUE; /* set to FALSE on error */ +hbool_t disp_failure_mssgs = TRUE; /* global force display of failure messages */ +const char *failure_mssg = NULL; + +const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/ + "mpio_vfd_test_file_1", /*1*/ + "mpio_vfd_test_file_2", /*2*/ + "mpio_vfd_test_file_3", /*3*/ + "mpio_vfd_test_file_4", /*4*/ + "mpio_vfd_test_file_5", /*5*/ + "mpio_vfd_test_file_6", /*6*/ + "subfiling_vfd_test_file_0", /*7*/ + "subfiling_vfd_test_file_1", /*8*/ + "subfiling_vfd_test_file_2", /*9*/ + "subfiling_vfd_test_file_3", /*10*/ + "subfiling_vfd_test_file_4", /*11*/ + "subfiling_vfd_test_file_5", /*12*/ + "subfiling_vfd_test_file_6", /*13*/ + NULL}; + +/* File Test Images + * + * Pointers to dynamically allocated buffers of size + * INTS_PER_RANK * sizeof(int32_t) * mpi_size(). These + * buffers are used to put the test file in a known + * state, and to test if the test file contains the + * expected data. + */ + +int32_t *increasing_fi_buf = NULL; +int32_t *decreasing_fi_buf = NULL; +int32_t *negative_fi_buf = NULL; +int32_t *zero_fi_buf = NULL; +int32_t *read_fi_buf = NULL; + +/* local utility function declarations */ + +static unsigned alloc_and_init_file_images(int mpi_size); +static void free_file_images(void); +static void setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name, haddr_t eoa, + H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr); +static void takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fapl_id_ptr, + hid_t *dxpl_id_ptr); + +/* test functions */ +static unsigned vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); + +static unsigned vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); + +/****************************************************************************/ +/***************************** Utility Functions ****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: alloc_and_init_file_images + * + * Purpose: Allocate and initialize the global buffers used to construct, + * load and verify test file contents. + * + * Return: void + * + * Programmer: John Mainzer + * 3/25/26 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +alloc_and_init_file_images(int mpi_size) +{ + const char *fcn_name = "alloc_and_init_file_images()"; + int cp = 0; + int buf_len; + size_t buf_size; + int i; + hbool_t show_progress = FALSE; + + pass = TRUE; + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* allocate the file image buffers */ + if (pass) { + + buf_len = INTS_PER_RANK * mpi_size; + buf_size = sizeof(int32_t) * (size_t)INTS_PER_RANK * (size_t)mpi_size; + + increasing_fi_buf = (int32_t *)HDmalloc(buf_size); + decreasing_fi_buf = (int32_t *)HDmalloc(buf_size); + negative_fi_buf = (int32_t *)HDmalloc(buf_size); + zero_fi_buf = (int32_t *)HDmalloc(buf_size); + read_fi_buf = (int32_t *)HDmalloc(buf_size); + + if ((!increasing_fi_buf) || (!decreasing_fi_buf) || (!negative_fi_buf) || (!zero_fi_buf) || + (!read_fi_buf)) { + + pass = FALSE; + failure_mssg = "Can't allocate one or more file image buffers."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* initialize the file image buffers */ + if (pass) { + + for (i = 0; i < buf_len; i++) { + + increasing_fi_buf[i] = i; + decreasing_fi_buf[i] = buf_len - i; + negative_fi_buf[i] = -i; + zero_fi_buf[i] = 0; + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* discard file image buffers if there was an error */ + if (!pass) { + + free_file_images(); + } + + return !pass; + +} /* alloc_and_init_file_images() */ + +/*------------------------------------------------------------------------- + * Function: free_file_images + * + * Purpose: Deallocate any glogal file image buffers that exist, and + * set their associated pointers to NULL. + * + * Return: void + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +free_file_images(void) +{ + if (increasing_fi_buf) { + + HDfree(increasing_fi_buf); + increasing_fi_buf = NULL; + } + + if (decreasing_fi_buf) { + + HDfree(decreasing_fi_buf); + decreasing_fi_buf = NULL; + } + + if (negative_fi_buf) { + + HDfree(negative_fi_buf); + negative_fi_buf = NULL; + } + + if (zero_fi_buf) { + + HDfree(zero_fi_buf); + zero_fi_buf = NULL; + } + + if (read_fi_buf) { + + HDfree(read_fi_buf); + read_fi_buf = NULL; + } + + return; + +} /* free_file_images() */ + +/*------------------------------------------------------------------------- + * Function: setup_vfd_test_file + * + * Purpose: Create / open the specified test file with the specified + * VFD, and set the EOA to the specified value. + * + * Setup the dxpl for subsequent I/O via the target VFD. + * + * Return a pointer to the instance of H5FD_t created on + * file open in *lf_ptr, and the FAPL and DXPL ids in + * *fapl_id_ptr and *dxpl_id_ptr. Similarly, copy the + * "fixed" file name into file_name on exit. + * + * Return: void + * + * Programmer: John Mainzer + * 3/25/26 + * + * Modifications: + * + * Updated for subfiling VFD 9/29/30 + * + *------------------------------------------------------------------------- + */ + +static void +setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name, haddr_t eoa, + H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr) +{ + const char *fcn_name = "setup_vfd_test_file()"; + char filename[512]; + int cp = 0; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + unsigned flags = 0; /* file open flags */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + + HDassert(vfd_name); + HDassert(lf_ptr); + HDassert(fapl_id_ptr); + HDassert(dxpl_id_ptr); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name -- do this now, since setting up the ioc faple requires it. This will probably + * change */ + if (pass) { + + if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setupf fapl for target VFD */ + if (pass) { + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) { + + pass = FALSE; + failure_mssg = "Can't create fapl."; + } + } + + if (pass) { + + if (HDstrcmp(vfd_name, "mpio") == 0) { + + if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0) { + + pass = FALSE; + failure_mssg = "Can't set mpio fapl."; + } + } +#ifdef H5_HAVE_SUBFILING_VFD + else if (HDstrcmp(vfd_name, H5FD_SUBFILING_NAME) == 0) { + + H5FD_subfiling_params_t shared_conf = { + /* ioc_selection = */ SELECT_IOC_ONE_PER_NODE, + /* stripe_size = */ (INTS_PER_RANK / 2), + /* stripe_count = */ 0, /* will over write */ + }; + H5FD_subfiling_config_t subfiling_conf = { + /* magic = */ H5FD_SUBFILING_FAPL_MAGIC, + /* version = */ H5FD_SUBFILING_CURR_FAPL_VERSION, + /* ioc_fapl_id = */ H5P_DEFAULT, /* will over write? */ + /* require_ioc = */ TRUE, + /* shared_cfg = */ shared_conf, + }; + H5FD_ioc_config_t ioc_config = { + /* magic = */ H5FD_IOC_FAPL_MAGIC, + /* version = */ H5FD_IOC_CURR_FAPL_VERSION, + /* thread_pool_size = */ H5FD_IOC_DEFAULT_THREAD_POOL_SIZE, + }; + hid_t ioc_fapl = H5I_INVALID_HID; + + if ((pass) && ((ioc_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)) { + + pass = FALSE; + failure_mssg = "Can't create ioc fapl."; + } + + /* set the MPI communicator and info in the FAPL */ + if (H5Pset_mpi_params(ioc_fapl, comm, info) < 0) { + + pass = FALSE; + failure_mssg = "Can't set MPI communicator and info in IOC fapl."; + } + + /* set the MPI communicator and info in the FAPL */ + if (H5Pset_mpi_params(fapl_id, comm, info) < 0) { + + pass = FALSE; + failure_mssg = "Can't set MPI communicator and info in subfiling fapl."; + } + + HDmemset(&ioc_config, 0, sizeof(ioc_config)); + HDmemset(&subfiling_conf, 0, sizeof(subfiling_conf)); + + /* Get subfiling VFD defaults */ + if ((pass) && (H5Pget_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) { + + pass = FALSE; + failure_mssg = "Can't get sub-filing VFD defaults."; + } + + if ((pass) && (subfiling_conf.require_ioc)) { + + /* Get IOC VFD defaults */ + if ((pass) && ((H5Pget_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) { + + pass = FALSE; + failure_mssg = "Can't get IOC VFD defaults."; + } + + /* Now we can set the IOC fapl. */ + if ((pass) && ((H5Pset_fapl_ioc(ioc_fapl, &ioc_config) == FAIL))) { + + pass = FALSE; + failure_mssg = "Can't set IOC fapl."; + } + } + else { + + if ((pass) && ((H5Pset_fapl_sec2(ioc_fapl) == FAIL))) { + + pass = FALSE; + failure_mssg = "Can't set sec2 fapl."; + } + } + + /* Assign the IOC fapl as the underlying VPD */ + subfiling_conf.ioc_fapl_id = ioc_fapl; + + /* Now we can set the SUBFILING fapl before returning. */ + if ((pass) && (H5Pset_fapl_subfiling(fapl_id, &subfiling_conf) == FAIL)) { + + pass = FALSE; + failure_mssg = "Can't set subfiling fapl."; + } + } +#endif + else { + pass = FALSE; + failure_mssg = "un-supported VFD"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name */ + if (pass) { + + if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Open the VFD test file with the specified VFD. */ + + if (pass) { + + flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC; + + if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) { + + pass = FALSE; + failure_mssg = "H5FDopen() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* set eoa as specified */ + + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + if (H5FDset_eoa(lf, H5FD_MEM_DEFAULT, eoa) < 0) { + + pass = FALSE; + failure_mssg = "H5FDset_eoa() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { /* setup dxpl */ + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + + if (dxpl_id < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (H5Pset_dxpl_mpio(dxpl_id, xfer_mode) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (H5Pset_dxpl_mpio_collective_opt(dxpl_id, coll_opt_mode) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed."; + } + } + + if (pass) { /* setup pointers with return values */ + + HDstrncpy(file_name, filename, 512); + *lf_ptr = lf; + *fapl_id_ptr = fapl_id; + *dxpl_id_ptr = dxpl_id; + } + else { /* tidy up from failure as possible */ + + if (lf) + H5FDclose(lf); + + if (fapl_id != -1) + H5Pclose(fapl_id); + + if (dxpl_id != -1) + H5Pclose(dxpl_id); + } + + return; + +} /* setup_vfd_test_file() */ + +/*------------------------------------------------------------------------- + * Function: takedown_vfd_test_file + * + * Purpose: Close and delete the specified test file. Close the + * FAPL & DXPL. + * + * Return: void + * + * Programmer: John Mainzer + * 3/25/26 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr) +{ + const char *fcn_name = "takedown_vfd_test_file()"; + int cp = 0; + hbool_t show_progress = FALSE; + + HDassert(lf_ptr); + HDassert(fapl_id_ptr); + HDassert(dxpl_id_ptr); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Close the test file if it is open, regardless of the value of pass. + * This should let the test program shut down more cleanly. + */ + + if (*lf_ptr) { + + if (H5FDclose(*lf_ptr) < 0) { + + pass = FALSE; + failure_mssg = "H5FDclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On rank 0, delete the test file. + */ + + /* wait for everyone to close the file */ + MPI_Barrier(comm); + + if (pass) { + + if ((mpi_rank == 0) && (HDremove(filename) < 0)) { + + pass = FALSE; + failure_mssg = "HDremove() failed.\n"; + } + } + + /* wait for the file delete to complete */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Close the fapl */ + if (H5Pclose(*fapl_id_ptr) < 0) { + + pass = FALSE; + failure_mssg = "can't close fapl.\n"; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Close the dxpl */ + if (H5Pclose(*dxpl_id_ptr) < 0) { + + pass = FALSE; + failure_mssg = "can't close dxpl.\n"; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return; + +} /* takedown_vfd_test_file() */ + +/****************************************************************************/ +/******************************* Test Functions *****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_1() + * + * Purpose: Simple vector read test: + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) On each rank, zero the read buffer, and then read + * INTS_PER_RANK * sizeof(int32) bytes from the file + * starting at offset mpi_rank * INTS_PER_RANK * + * sizeof(int32_t) in both the file and read_fi_buf. + * Do this with a vector read containing a single + * element. + * + * Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * increasing_fi_buf. + * + * 5) Barrier + * + * 6) Close the test file. + * + * 7) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_1()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + void *bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / ind I/O", + vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 1 -- %s / col op / col I/O", + vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, zero the read buffer, and then read + * INTS_PER_RANK * sizeof(int32) bytes from the file + * starting at offset mpi_rank * INTS_PER_RANK * + * sizeof(int32_t) in both the file and read_fi_buf. + * Do this with a vector read containing a single + * element. + * + * Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * increasing_fi_buf. + */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + if ((i < mpi_rank * INTS_PER_RANK) || (i >= (mpi_rank + 1) * INTS_PER_RANK)) { + + if (read_fi_buf[i] != 0) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (1).\n"; + break; + } + } + else { + + if (read_fi_buf[i] != increasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (2).\n"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_1() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_2() + * + * Purpose: Simple vector read test with only half of ranks + * participating in each vector read. + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Using rank zero, write the entire decreasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) On each rank, zero the read buffer. + * + * 5) On even ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Odd ranks perform an empty read. + * + * 6) Barrier. + * + * 7) On odd ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Even ranks perform an empty read. + * + * 8) Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * decreasing_fi_buf. + * + * 9) Barrier + * + * 10) Close the test file. + * + * 11) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_2()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + void *bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / ind I/O", + vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 2 -- %s / col op / col I/O", + vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire decreasing_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)decreasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, zero the read buffer. */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) On even ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Odd ranks perform an empty read. + */ + if (pass) { + + if (mpi_rank % 2 == 0) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK])); + } + else { + + count = 0; + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) On odd ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Even ranks perform an empty read. + */ + if (pass) { + + if (mpi_rank % 2 == 1) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK])); + } + else { + + count = 0; + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * decreasing_fi_buf. + */ + + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + if ((i < mpi_rank * INTS_PER_RANK) || (i >= (mpi_rank + 1) * INTS_PER_RANK)) { + + if (read_fi_buf[i] != 0) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (1).\n"; + break; + } + } + else { + + if (read_fi_buf[i] != decreasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (2).\n"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 9) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 10) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_2() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_3() + * + * Purpose: Verify that vector read works with multiple entries in + * the vector in each read, and that read buffers need not + * be in increasing (memory) address order. + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Using rank zero, write the entire negative_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) On each rank, zero the four read buffers. + * + * 5) On each rank, do a vector read from the file, with + * each rank's vector having four elements, with each + * element reading INTS_PER_RANK / 4 * sizeof(int32) + * bytes, and the reads starting at address: + * + * (mpi_rank * INTS_PER_RANK) * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 4) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + 3 * INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * On even ranks, the targets of the reads should be + * buf_0, buf_1, buf_2, and buf_3 respectively. + * + * On odd ranks, the targets of the reads should be + * buf_3, buf_2, buf_1, and buf_0 respectively. + * + * This has the effect of ensuring that on at least + * some ranks, the read buffers are not in increasing + * address order. + * + * 6) Verify that buf_0, buf_1, buf_2, and buf_3 contain + * the expected data. Note that this will be different + * on even vs. odd ranks. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_3()"; + char test_title[120]; + char filename[512]; + int32_t buf_0[(INTS_PER_RANK / 4) + 1]; + int32_t buf_1[(INTS_PER_RANK / 4) + 1]; + int32_t buf_2[(INTS_PER_RANK / 4) + 1]; + int32_t buf_3[(INTS_PER_RANK / 4) + 1]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void *bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / ind I/O", + vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 3 -- %s / col op / col I/O", + vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)negative_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, zero the four read buffers. */ + if (pass) { + + for (i = 0; i <= INTS_PER_RANK / 4; i++) { + + buf_0[i] = 0; + buf_1[i] = 0; + buf_2[i] = 0; + buf_3[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) On each rank, do a vector read from the file, with + * each rank's vector having four elements, with each + * element reading INTS_PER_RANK / 4 * sizeof(int32) + * bytes, and the reads starting at address: + * + * (mpi_rank * INTS_PER_RANK) * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 4) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + 3 * INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * On even ranks, the targets of the reads should be + * buf_0, buf_1, buf_2, and buf_3 respectively. + * + * On odd ranks, the targets of the reads should be + * buf_3, buf_2, buf_1, and buf_0 respectively. + * + * This has the effect of ensuring that on at least + * some ranks, the read buffers are not in increasing + * address order. + */ + if (pass) { + + haddr_t base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + count = 4; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr; + sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + ((haddr_t)(INTS_PER_RANK / 4) * (haddr_t)(sizeof(int32_t))); + sizes[1] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = base_addr + ((haddr_t)(INTS_PER_RANK / 2) * (haddr_t)(sizeof(int32_t))); + sizes[2] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + + types[3] = H5FD_MEM_DRAW; + addrs[3] = base_addr + ((haddr_t)(3 * INTS_PER_RANK / 4) * (haddr_t)(sizeof(int32_t))); + sizes[3] = (size_t)INTS_PER_RANK / 4 * sizeof(int32_t); + + if (mpi_rank % 2 == 0) { + + bufs[0] = (void *)(&(buf_0[0])); + bufs[1] = (void *)(buf_1); + bufs[2] = (void *)(buf_2); + bufs[3] = (void *)(buf_3); + } + else { + + bufs[0] = (void *)(&(buf_3[0])); + bufs[1] = (void *)(buf_2); + bufs[2] = (void *)(buf_1); + bufs[3] = (void *)(buf_0); + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Verify that buf_0, buf_1, buf_2, and buf_3 contain + * the expected data. Note that this will be different + * on even vs. odd ranks. + */ + if (pass) { + + int base_index = mpi_rank * INTS_PER_RANK; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_0[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_3[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (1).\n"; + } + } + + base_index += INTS_PER_RANK / 4; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_1[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_2[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (2).\n"; + } + } + + base_index += INTS_PER_RANK / 4; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_2[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_1[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (3).\n"; + } + } + + base_index += INTS_PER_RANK / 4; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_3[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_0[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (4).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_3() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_4() + * + * Purpose: Test vector I/O reads with vectors of different lengths + * and entry sizes across the ranks. Vectors are not, in + * general, sorted in increasing address order. Further, + * reads are not, in general, contiguous. + * + * 1) Open the test file with the specified VFD, set the eoa. + * and setup the DXPL. + * + * 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) Set all cells of read_fi_buf to zero. + * + * 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + * + * if ( rank % 4 == 0 ) construct a vector that reads: + * + * INTS_PER_RANK / 4 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 2 * + * sizeof(int32_t), + * + * INTS_PER_RANK / 8 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 4 * + * sizeof(int32_t), and + * + * INTS_PER_RANK / 16 * sizeof(int32_t) butes + * starting at base_addr + INTS_PER_RANK / 16 * + * sizeof(int32_t) + * + * to the equivalent locations in read_fi_buf + * + * if ( rank % 4 == 1 ) construct a vector that reads: + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) + * bytes starting at base_addr + sizeof(int32_t), and + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) bytes + * starting at base_addr + (INTS_PER_RANK / 2 + 1) * + * sizeof(int32_t). + * + * to the equivalent locations in read_fi_buf + * + * if ( rank % 4 == 2 ) construct a vector that reads: + * + * sizeof(int32_t) bytes starting at base_index + + * (INTS_PER_RANK / 2) * sizeof int32_t. + * + * to the equivalent locations in read_fi_buf + * + * if ( rank % 4 == 3 ) construct and read the empty vector + * + * 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_4()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + haddr_t base_addr; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int k; + int base_index; + uint32_t count = 0; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void *bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / ind I/O", + vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 4 -- %s / col op / col I/O", + vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Set all cells of read_fi_buf to zero. */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + */ + if (pass) { + + base_index = mpi_rank * INTS_PER_RANK; + base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t); + + if ((mpi_rank % 4) == 0) { + + /* if ( rank % 4 == 0 ) construct a vector that reads: + * + * INTS_PER_RANK / 4 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 2 * + * sizeof(int32_t), + * + * INTS_PER_RANK / 8 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 4 * + * sizeof(int32_t), and + * + * INTS_PER_RANK / 16 * sizeof(int32_t) butes + * starting at base_addr + INTS_PER_RANK / 16 * + * sizeof(int32_t) + * + * to the equivalent locations in read_fi_buf + */ + + count = 3; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2)])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 4) * sizeof(int32_t)); + sizes[1] = (size_t)(INTS_PER_RANK / 8) * sizeof(int32_t); + bufs[1] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 16) * sizeof(int32_t)); + sizes[2] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t); + bufs[2] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 16)])); + } + else if ((mpi_rank % 4) == 1) { + + /* if ( rank % 4 == 1 ) construct a vector that reads: + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) + * bytes starting at base_addr + sizeof(int32_t), and + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) bytes + * starting at base_addr + (INTS_PER_RANK / 2 + 1) * + * sizeof(int32_t). + * + * to the equivalent locations in read_fi_buf + */ + count = 2; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[base_index + 1])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)((INTS_PER_RANK / 2) + 1) * sizeof(int32_t)); + sizes[1] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[1] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2) + 1])); + } + else if ((mpi_rank % 4) == 2) { + + /* if ( rank % 4 == 2 ) construct a vector that reads: + * + * sizeof(int32_t) bytes starting at base_index + + * (INTS_PER_RANK / 2) * sizeof int32_t. + * + * to the equivalent locations in read_fi_buf + */ + count = 1; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2)])); + } + else if ((mpi_rank % 4) == 3) { + + /* if ( rank % 4 == 3 ) construct and read the empty vector */ + + count = 0; + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed (1).\n"; + } + } + + /* 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + */ + if (pass) { + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; +#if 1 + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + k = j - base_index; +#else + for (k = 0; k < INTS_PER_RANK; k++) { + + j = k + base_index; +#endif + + if (i == mpi_rank) { + + switch (i % 4) { + + case 0: + if (((INTS_PER_RANK / 2) <= k) && (k < (3 * (INTS_PER_RANK / 4)))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.1)"; + HDfprintf(stdout, "\nread_fi_buf[%d] = %d, increasing_fi_buf[%d] = %d\n", + j, read_fi_buf[j], j, increasing_fi_buf[j]); + } + } + else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.2)"; + } + } + else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.3)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.4)"; + } + } + break; + + case 1: + if ((1 <= k) && (k <= ((INTS_PER_RANK / 2) - 2))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.1)"; + } + } + else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.2)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.3)"; + } + } + break; + + case 2: + if (k == INTS_PER_RANK / 2) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.1)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.2)"; + } + } + break; + + case 3: + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (4)"; + } + break; + + default: + HDassert(FALSE); /* should be un-reachable */ + break; + } + } + else if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (5)"; + } + } /* end for loop */ + } /* end for loop */ + } /* end if */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_4() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_5() + * + * Purpose: Test correct management of the sizes[] array optimization, + * where, if sizes[i] == 0, we use sizes[i - 1] as the value + * of size[j], for j >= i. + * + * 1) Open the test file with the specified VFD, set the eoa. + * and setup the DXPL. + * + * 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) Set all cells of read_fi_buf to zero. + * + * 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * that reads every 16th integer located in that + * that range starting at base_addr. Use a sizes[] + * array of length 2, with sizes[0] set to sizeof(int32_t), + * and sizes[1] = 0. + * + * Read the integers into the corresponding locations in + * read_fi_buf. + * + * 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_5()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + haddr_t base_addr; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int base_index; + uint32_t count = 0; + H5FD_mem_t types[(INTS_PER_RANK / 16) + 1]; + haddr_t addrs[(INTS_PER_RANK / 16) + 1]; + size_t sizes[2]; + void *bufs[(INTS_PER_RANK / 16) + 1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / ind I/O", + vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), "parallel vector read test 5 -- %s / col op / col I/O", + vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Set all cells of read_fi_buf to zero. */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * that reads every 16th integer located in that + * that range starting at base_addr. Use a sizes[] + * array of length 2, with sizes[0] set to sizeof(int32_t), + * and sizes[1] = 0. + * + * Read the integers into the corresponding locations in + * read_fi_buf. + */ + if (pass) { + + base_index = (mpi_rank * INTS_PER_RANK); + base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t); + + count = INTS_PER_RANK / 16; + sizes[0] = sizeof(int32_t); + sizes[1] = 0; + + for (i = 0; i < INTS_PER_RANK / 16; i++) { + + types[i] = H5FD_MEM_DRAW; + addrs[i] = base_addr + ((haddr_t)(16 * i) * (haddr_t)sizeof(int32_t)); + bufs[i] = (void *)(&(read_fi_buf[base_index + (i * 16)])); + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + */ + if (pass) { + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + if ((i == mpi_rank) && (j % 16 == 0)) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + } + } + else if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + } + } /* end for loop */ + } /* end for loop */ + } /* end if */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_5() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_1() + * + * Purpose: Simple vector write test: + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Write the entire increasing_fi_buf to the file, with + * exactly one buffer per vector per rank. Use either + * independent or collective I/O as specified. + * + * 3) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf. Report failure + * if any differences are detected. + * + * 5) Close the test file. + * + * 6) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_1()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + const void *bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 1 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 1 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 1 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Write the entire increasing_fi_buf to the file, with + * exactly one buffer per vector per rank. Use either + * independent or collective I/O as specified. + */ + + if (pass) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (const void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf. Report failure + * if any differences are detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + if (read_fi_buf[i] != increasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file"; + break; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_1() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_2() + * + * Purpose: Test vector I/O writes in which only some ranks participate. + * Depending on the collective parameter, these writes will + * be either collective or independent. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) Write the odd blocks of the increasing_fi_buf to the file, + * with the odd ranks writing the odd blocks, and the even + * ranks writing an empty vector. + * + * Here, a "block" of the increasing_fi_buf is a sequence + * of integers in increasing_fi_buf of length INTS_PER_RANK, + * and with start index a multiple of INTS_PER_RANK. + * + * 3) Write the even blocks of the negative_fi_buf to the file, + * with the even ranks writing the even blocks, and the odd + * ranks writing an empty vector. + * + * 4) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf and negative_fi_buf + * as appropriate. Report failure if any differences are + * detected. + * + * 5) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/28/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_2()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + const void *bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 2 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 2 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 2 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Write the odd blocks of the increasing_fi_buf to the file, + * with the odd ranks writing the odd blocks, and the even + * ranks writing an empty vector. + * + * Here, a "block" of the increasing_fi_buf is a sequence + * of integers in increasing_fi_buf of length INTS_PER_RANK, + * and with start index a multiple of INTS_PER_RANK. + */ + if (pass) { + + if (mpi_rank % 2 == 1) { /* odd ranks */ + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (const void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + else { /* even ranks */ + + if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (2).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Write the even blocks of the negative_fi_buf to the file, + * with the even ranks writing the even blocks, and the odd + * ranks writing an empty vector. + */ + if (pass) { + + if (mpi_rank % 2 == 1) { /* odd ranks */ + + if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (3).\n"; + } + } + else { /* even ranks */ + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (const void *)(&(negative_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (4).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Barrier + */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf. Report failure + * if any differences are detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + if (i % 2 == 1) { /* odd block */ + + for (j = i * INTS_PER_RANK; ((pass) && (j < (i + 1) * INTS_PER_RANK)); j++) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file"; + break; + } + } + } + else { /* even block */ + + for (j = i * INTS_PER_RANK; ((pass) && (j < (i + 1) * INTS_PER_RANK)); j++) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file"; + break; + } + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_2() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_3() + * + * Purpose: Test vector I/O writes with vectors of multiple entries. + * For now, keep the vectors sorted in increasing address + * order. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf. + * + * Write to file. + * + * 3) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 5) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/31/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_3()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + int base_index; + int ints_per_write; + size_t bytes_per_write; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + const void *bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 3 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 3 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 3 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf. + * + * Write to file. + */ + if (pass) { + + count = 4; + + base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + ints_per_write = INTS_PER_RANK / 4; + bytes_per_write = (size_t)(ints_per_write) * sizeof(int32_t); + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr; + sizes[0] = bytes_per_write; + bufs[0] = (const void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = addrs[0] + (haddr_t)(bytes_per_write); + sizes[1] = bytes_per_write; + bufs[1] = (const void *)(&(decreasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = addrs[1] + (haddr_t)(bytes_per_write); + sizes[2] = bytes_per_write; + bufs[2] = (const void *)(&(negative_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 2)])); + + types[3] = H5FD_MEM_DRAW; + addrs[3] = addrs[2] + (haddr_t)(bytes_per_write); + sizes[3] = bytes_per_write; + bufs[3] = (const void *)(&(zero_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != zero_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_3() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_4() + * + * Purpose: Test vector I/O writes with vectors of multiple entries. + * For now, keep the vectors sorted in increasing address + * order. + * + * This test differs from vector_write_test_3() in the order + * in which the file image buffers appear in the vector + * write. This guarantees that at least one of these + * tests will present buffers with non-increasing addresses + * in RAM. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from zero_fi_buf, negative_fi_buf, + * decreasing_fi_buf, and increasing_fi_buf. + * + * Write to file. + * + * 3) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, negative_fi_buf, + * decreasing_fi_buf, and increasing_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 5) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/31/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_4()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + int base_index; + int ints_per_write; + size_t bytes_per_write; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + const void *bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 4 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 4 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 4 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf. + * + * Write to file. + */ + if (pass) { + + count = 4; + + base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + ints_per_write = INTS_PER_RANK / 4; + bytes_per_write = (size_t)(ints_per_write) * sizeof(int32_t); + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr; + sizes[0] = bytes_per_write; + bufs[0] = (const void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = addrs[0] + (haddr_t)(bytes_per_write); + sizes[1] = bytes_per_write; + bufs[1] = (const void *)(&(negative_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = addrs[1] + (haddr_t)(bytes_per_write); + sizes[2] = bytes_per_write; + bufs[2] = (const void *)(&(decreasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 2)])); + + types[3] = H5FD_MEM_DRAW; + addrs[3] = addrs[2] + (haddr_t)(bytes_per_write); + sizes[3] = bytes_per_write; + bufs[3] = + (const void *)(&(increasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != zero_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_4() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_5() + * + * Purpose: Test vector I/O writes with vectors of different lengths + * and entry sizes across the ranks. Vectors are not, in + * general, sorted in increasing address order. Further, + * writes are not, in general, contiguous. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) Set the test file in a known state by writing zeros + * to all bytes in the test file. Since we have already + * tested this, do this via a vector write of zero_fi_buf. + * + * 3) Barrier + * + * 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + * + * if ( rank % 4 == 0 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for INTS_PER_RANK / 4 + * entries, + * + * decreasing_fi_buf starting at base_index + + * INTS_PER_RANK / 4 and running for INTS_PER_RANK / 8 + * entries, and + * + * increasing_fi_buf starting at base_index + + * INTS_PER_RANK / 16 and running for INTS_PER_RANK / 16 + * entries + * + * to the equivalent locations in the file. + * + * if ( rank % 4 == 1 ) construct a vector that writes: + * + * increasing_fi_buf starting at base_index + 1 and + * running for (INTS_PER_RANK / 2) - 2 entries, and + * + * decreasing_fi_buf startomg at base_index + + * INTS_PER_RANK / 2 + 1 and running for (INTS_PER_RANK / 2) + * - 2 entries + * + * if ( rank % 4 == 2 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for one entry. + * + * if ( rank % 4 == 3 ) construct and write the empty vector + * + * 5) Barrier + * + * 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, negative_fi_buf, + * decreasing_fi_buf, and increasing_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 7) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/31/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_5()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + int base_index; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int k; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + const void *bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 5 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 5 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 5 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Set the test file in a known state by writing zeros + * to all bytes in the test file. Since we have already + * tested this, do this via a vector write of zero_fi_buf. + */ + if (pass) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (const void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + */ + if (pass) { + + base_index = mpi_rank * INTS_PER_RANK; + base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t)); + + if ((mpi_rank % 4) == 0) { + + /* if ( rank % 4 == 0 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for INTS_PER_RANK / 4 + * entries, + * + * decreasing_fi_buf starting at base_index + + * INTS_PER_RANK / 4 and running for INTS_PER_RANK / 8 + * entries, and + * + * increasing_fi_buf starting at base_index + + * INTS_PER_RANK / 16 and running for INTS_PER_RANK / 16 + * entries + * + * to the equivalent locations in the file. + */ + count = 3; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + bufs[0] = (const void *)(&(negative_fi_buf[base_index + (INTS_PER_RANK / 2)])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 4) * sizeof(int32_t)); + sizes[1] = (size_t)(INTS_PER_RANK / 8) * sizeof(int32_t); + bufs[1] = (const void *)(&(decreasing_fi_buf[base_index + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 16) * sizeof(int32_t)); + sizes[2] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t); + bufs[2] = (const void *)(&(increasing_fi_buf[base_index + (INTS_PER_RANK / 16)])); + } + else if ((mpi_rank % 4) == 1) { + + /* if ( rank % 4 == 1 ) construct a vector that writes: + * + * increasing_fi_buf starting at base_index + 1 and + * running for (INTS_PER_RANK / 2) - 2 entries, and + * + * decreasing_fi_buf startomg at base_addr + + * INTS_PER_RANK / 2 + 1 and running for (INTS_PER_RANK / 2) + * - 2 entries + * + * to the equivalent locations in the file. + */ + count = 2; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[0] = (const void *)(&(increasing_fi_buf[base_index + 1])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)((INTS_PER_RANK / 2) + 1) * sizeof(int32_t)); + sizes[1] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[1] = (const void *)(&(decreasing_fi_buf[base_index + (INTS_PER_RANK / 2) + 1])); + } + else if ((mpi_rank % 4) == 2) { + + /* if ( rank % 4 == 2 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for one entry. + * + * to the equivalent location in the file. + */ + count = 1; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = sizeof(int32_t); + bufs[0] = (const void *)(&(negative_fi_buf[base_index + (INTS_PER_RANK / 2)])); + } + else if ((mpi_rank % 4) == 3) { + + /* if ( rank % 4 == 3 ) construct and write the empty vector */ + + count = 0; + } + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + k = j - base_index; + + switch (i % 4) { + + case 0: + if (((INTS_PER_RANK / 2) <= k) && (k < (3 * (INTS_PER_RANK / 4)))) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.1)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + negative_fi_buf[j]); + } + } + else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.2)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + decreasing_fi_buf[j]); + } + } + else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.3)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + increasing_fi_buf[j]); + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.4)"; + } + } + break; + + case 1: + if ((1 <= k) && (k <= ((INTS_PER_RANK / 2) - 2))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.1)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + increasing_fi_buf[j]); + } + } + else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.2)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + decreasing_fi_buf[j]); + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.3)"; + } + } + break; + + case 2: + if (k == INTS_PER_RANK / 2) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.1)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + negative_fi_buf[j]); + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.2)"; + } + } + break; + + case 3: + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (4)"; + } + break; + + default: + HDassert(FALSE); /* should be un-reachable */ + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_5() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_6() + * + * Purpose: Test correct management of the sizes[] array optimization, + * where, if sizes[i] == 0, we use sizes[i - 1] as the value + * of size[j], for j >= i. + * + * 1) Open the test file with the specified VFD, set the eoa. + * and setup the DXPL. + * + * 2) Using rank zero, write the entire zero_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write from increasing_fi_buf between + * base_addr and base_addr + INTS_PER_RANK * + * sizeof(int32_t) - 1 that writes every 16th integer + * located in that range starting at base_addr. + * Use a sizes[] array of length 2, with sizes[0] set + * to sizeof(int32_t), and sizes[1] = 0. + * + * Write the integers into the corresponding locations in + * the file. + * + * 5) Barrier + * + * 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, and increasing_fi_buf + * as appropriate. Report failure if any differences are + * detected. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_6()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + haddr_t base_addr; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int base_index; + uint32_t count = 0; + H5FD_mem_t types[(INTS_PER_RANK / 16) + 1]; + haddr_t addrs[(INTS_PER_RANK / 16) + 1]; + size_t sizes[2]; + const void *bufs[(INTS_PER_RANK / 16) + 1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsnprintf(test_title, sizeof(test_title), "parallel vector write test 6 -- %s / independent", + vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 6 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsnprintf(test_title, sizeof(test_title), + "parallel vector write test 6 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)zero_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write from increasing_fi_buf between + * base_addr and base_addr + INTS_PER_RANK * + * sizeof(int32_t) - 1 that writes every 16th integer + * located in that range starting at base_addr. + * Use a sizes[] array of length 2, with sizes[0] set + * to sizeof(int32_t), and sizes[1] = 0. + * + * Write the integers into the corresponding locations in + * the file. + */ + if (pass) { + + base_index = (mpi_rank * INTS_PER_RANK); + base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t); + + count = INTS_PER_RANK / 16; + sizes[0] = sizeof(int32_t); + sizes[1] = 0; + + for (i = 0; i < INTS_PER_RANK / 16; i++) { + + types[i] = H5FD_MEM_DRAW; + addrs[i] = base_addr + ((haddr_t)(16 * i) * (haddr_t)sizeof(int32_t)); + bufs[i] = (const void *)(&(increasing_fi_buf[base_index + (i * 16)])); + } + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, and increasing_fi_buf + * as appropriate. Report failure if any differences are + * detected. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size * INTS_PER_RANK)); i++) { + + if (i % 16 == 0) { + + if (read_fi_buf[i] != increasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + } + } + else if (read_fi_buf[i] != zero_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + } + } + } /* end if */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_6() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_7() + * + * Purpose: Test vector I/O with larger vectors -- 8 elements in each + * vector for now. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) Set the test file in a known state by writing zeros + * to all bytes in the test file. Since we have already + * tested this, do this via a vector write of zero_fi_buf. + * + * 3) Barrier + * + * 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector of length 8, with each element of + * length INTS_PER_RANK / 16, and base address + * base_addr + i * (INTS_PER_RANK / 8), where i is + * the index of the entry (starting at zero). Draw + * written data from the equivalent locations in + * increasing_fi_buf. + * + * Write the vector. + * + * 5) Barrier + * + * 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, and increasing_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 7) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 10/10/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_7(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_7()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + haddr_t addr_increment; + int base_index; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t *lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int k; + uint32_t count; + H5FD_mem_t types[8]; + haddr_t addrs[8]; + size_t sizes[8]; + const void *bufs[8]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + HDsprintf(test_title, "parallel vector write test 7 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + HDsprintf(test_title, "parallel vector write test 7 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Set the test file in a known state by writing zeros + * to all bytes in the test file. Since we have already + * tested this, do this via a vector write of zero_fi_buf. + */ + if (pass) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + base_index = mpi_rank * INTS_PER_RANK; + base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t)); + addr_increment = (haddr_t)((INTS_PER_RANK / 8) * sizeof(int32_t)); + + count = 8; + + for (i = 0; i < (int)count; i++) { + + types[i] = H5FD_MEM_DRAW; + addrs[i] = base_addr + ((haddr_t)(i)*addr_increment); + sizes[i] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t); + bufs[i] = (void *)(&(increasing_fi_buf[base_index + (i * (INTS_PER_RANK / 8))])); + } + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + MPI_Barrier(comm); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + k = j - base_index; + + if ((k % (INTS_PER_RANK / 8)) < (INTS_PER_RANK / 16)) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + + HDprintf("\nread_fi_buf[%d] = %d, %d expected.\n", j, read_fi_buf[j], + increasing_fi_buf[j]); + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + + HDprintf("\nread_fi_buf[%d] = %d, 0 expected.\n", j, read_fi_buf[j]); + } + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if ((disp_failure_mssgs) || (show_progress)) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_7() */ + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Run parallel VFD tests. + * + * Return: Success: 0 + * + * Failure: 1 + * + * Programmer: John Mainzer + * 3/2621/ + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +int +main(int argc, char **argv) +{ + unsigned nerrs = 0; +#ifdef H5_HAVE_SUBFILING_VFD + int required = MPI_THREAD_MULTIPLE; + int provided = 0; +#endif + int mpi_size; + int mpi_rank; + +#ifdef H5_HAVE_SUBFILING_VFD + if (MPI_SUCCESS != MPI_Init_thread(&argc, &argv, required, &provided)) { + HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n"); + goto finish; + } + + if (provided != required) { + HDprintf(" MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE. Exiting\n"); + goto finish; + } +#else + if (MPI_SUCCESS != MPI_Init(&argc, &argv)) { + HDprintf(" MPI_Init failed. Exiting\n"); + goto finish; + } +#endif + + MPI_Comm_size(comm, &mpi_size); + MPI_Comm_rank(comm, &mpi_rank); + + /* Attempt to turn off atexit post processing so that in case errors + * occur during the test and the process is aborted, it will not hang + * in the atexit post processing. If it does, it may try to make MPI + * calls which may not work. + */ + if (H5dont_atexit() < 0) + HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); + + H5open(); + + if (mpi_rank == 0) { + HDprintf("=========================================\n"); + HDprintf("Parallel virtual file driver (VFD) tests\n"); + HDprintf(" mpi_size = %d\n", mpi_size); + HDprintf("=========================================\n"); + } + + if (mpi_size < 2) { + if (mpi_rank == 0) + HDprintf(" Need at least 2 processes. Exiting.\n"); + goto finish; + } + + alloc_and_init_file_images(mpi_size); + + if (!pass) { + + HDprintf("\nAllocation and initialize of file image buffers failed. Test aborted.\n"); + } + + MPI_Barrier(comm); + + if (mpi_rank == 0) { + + HDprintf("\n\n --- TESTING MPIO VFD --- \n\n"); + } + + nerrs += + vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_7(6, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + MPI_Barrier(comm); + +#ifdef H5_HAVE_SUBFILING_VFD + if (mpi_rank == 0) { + + HDprintf("\n\n --- TESTING SUBFILING VFD --- \n\n"); + } + + nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_read_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_1(7, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_2(8, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_3(9, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_4(10, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_5(11, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_6(12, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); + + nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, + H5FD_SUBFILING_NAME); + nerrs += vector_write_test_7(13, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, + H5FD_SUBFILING_NAME); +#endif + +finish: + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(comm); + + if (mpi_rank == 0) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrs > 0) + HDprintf("***vfd tests detected %d failures***\n", nerrs); + else + HDprintf("vfd tests finished with no failures\n"); + HDprintf("===================================\n"); + } + + /* discard the file image buffers */ + free_file_images(); + + /* close HDF5 library */ + H5close(); + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrs) because exit code is limited to 1byte */ + return (nerrs > 0); + +} /* main() */ diff --git a/testpar/testpar.h b/testpar/testpar.h index 2c99103..6c380a9 100644 --- a/testpar/testpar.h +++ b/testpar/testpar.h @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* common definitions used by all parallel test programs. */ @@ -24,7 +21,7 @@ #include "h5test.h" /* Constants definitions */ -#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ +#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */ /* Define some handy debugging shorthands, routines, ... */ /* debugging tools */ @@ -32,11 +29,11 @@ /* Print message mesg if verbose level is at least medium and * mesg is not an empty string. */ -#define MESG(mesg) \ - if (VERBOSE_MED && *mesg != '\0') \ - printf("%s\n", mesg) +#define MESG(mesg) \ + if (VERBOSE_MED && *mesg != '\0') \ + HDprintf("%s\n", mesg) -/* +/* * VRFY: Verify if the condition val is true. * If it is true, then call MESG to print mesg, depending on the verbose * level. @@ -46,56 +43,63 @@ * This will allow program to continue and can be used for debugging. * (The "do {...} while(0)" is to group all the statements as one unit.) */ -#define VRFY(val, mesg) do { \ - if (val) { \ - MESG(mesg); \ - } else { \ - printf("Proc %d: ", mpi_rank); \ - printf("*** Parallel ERROR ***\n"); \ - printf(" VRFY (%s) failed at line %4d in %s\n", \ - mesg, (int)__LINE__, __FILE__); \ - ++nerrors; \ - fflush(stdout); \ - if (!VERBOSE_MED) { \ - printf("aborting MPI processes\n"); \ - MPI_Abort(MPI_COMM_WORLD, 1); \ - } \ - } \ -} while(0) +#define VRFY_IMPL(val, mesg, rankvar) \ + do { \ + if (val) { \ + MESG(mesg); \ + } \ + else { \ + HDprintf("Proc %d: ", rankvar); \ + HDprintf("*** Parallel ERROR ***\n"); \ + HDprintf(" VRFY (%s) failed at line %4d in %s\n", mesg, (int)__LINE__, __FILE__); \ + ++nerrors; \ + fflush(stdout); \ + if (!VERBOSE_MED) { \ + HDprintf("aborting MPI processes\n"); \ + MPI_Abort(MPI_COMM_WORLD, 1); \ + } \ + } \ + } while (0) + +#define VRFY_G(val, mesg) VRFY_IMPL(val, mesg, mpi_rank_g) +#define VRFY(val, mesg) VRFY_IMPL(val, mesg, mpi_rank) /* * Checking for information purpose. * If val is false, print mesg; else nothing. * Either case, no error setting. */ -#define INFO(val, mesg) do { \ - if (val) { \ - MESG(mesg); \ - } else { \ - printf("Proc %d: ", mpi_rank); \ - printf("*** PHDF5 REMARK (not an error) ***\n"); \ - printf(" Condition (%s) failed at line %4d in %s\n", \ - mesg, (int)__LINE__, __FILE__); \ - fflush(stdout); \ - } \ -} while(0) +#define INFO(val, mesg) \ + do { \ + if (val) { \ + MESG(mesg); \ + } \ + else { \ + HDprintf("Proc %d: ", mpi_rank); \ + HDprintf("*** PHDF5 REMARK (not an error) ***\n"); \ + HDprintf(" Condition (%s) failed at line %4d in %s\n", mesg, (int)__LINE__, __FILE__); \ + fflush(stdout); \ + } \ + } while (0) -#define MPI_BANNER(mesg) do { \ - if (VERBOSE_MED || MAINPROCESS){ \ - printf("--------------------------------\n"); \ - printf("Proc %d: ", mpi_rank); \ - printf("*** %s\n", mesg); \ - printf("--------------------------------\n"); \ - } \ -} while(0) +#define MPI_BANNER(mesg) \ + do { \ + if (VERBOSE_MED || MAINPROCESS) { \ + HDprintf("--------------------------------\n"); \ + HDprintf("Proc %d: ", mpi_rank); \ + HDprintf("*** %s\n", mesg); \ + HDprintf("--------------------------------\n"); \ + } \ + } while (0) -#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */ +#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */ -#define SYNC(comm) do { \ - MPI_BANNER("doing a SYNC"); \ - MPI_Barrier(comm); \ - MPI_BANNER("SYNC DONE"); \ -} while(0) +#define SYNC(comm) \ + do { \ + MPI_BANNER("doing a SYNC"); \ + MPI_Barrier(comm); \ + MPI_BANNER("SYNC DONE"); \ + } while (0) /* End of Define some handy debugging shorthands, routines, ... */ diff --git a/testpar/testpflush.sh.in b/testpar/testpflush.sh.in new file mode 100644 index 0000000..4720d6c --- /dev/null +++ b/testpar/testpflush.sh.in @@ -0,0 +1,63 @@ +#! /bin/sh +# +# Copyright by The HDF Group. +# All rights reserved. +# +# This file is part of HDF5. The full HDF5 copyright notice, including +# terms governing use, modification, and redistribution, is contained in +# the COPYING file, which can be found at the root of the source code +# distribution tree, or in https://www.hdfgroup.org/licenses. +# If you do not have access to either file, you may request a copy from +# help@hdfgroup.org. +# +# +# Test script for the parallel flush test +# +# The parallel flush test uses two programs to test flush operations +# in parallel HDF5. The first program purposely exits without calling +# MPI_Finalize(), which is an error under the MPI standard and mpiexec +# in some implementations will return an error code even though all +# processes exit successfully. This script lets us swallow the error +# from the first program. +# +# True errors in the first program will be detected as errors in the +# second program, so watch out for that. +# +# Programmer: Dana Robinson +# Fall 2018 + +# The build (current) directory might be different than the source directory. +if test -z "$srcdir"; then + srcdir=. +fi + +# Turn the $$ we use to avoid Autotools munging into $ +# +# Allowing $$ to substitute in both the RUNPARALLEL string and the +# regexp is intentional. There doesn't seem to be a way around +# this using quote shenanigans. The downside is that there is a remote +# chance that the shell's pid will match a number in the RUNPARALLEL +# variable, but that seems less likely to cause problems than expecting +# library builders to specify two almost identical versions of the +# RUNPARALLEL command, one for use in scripts and one via Makefiles. +RUNPARALLELSCRIPT=`echo "@RUNPARALLEL@" | sed "s/$$/\$/g"` + +# ========================================== +# Run the first parallel flush test program +# (note that we ignore any errors here) +# ========================================== +echo "*** NOTE ***********************************************************" +echo "You may see complaints from mpiexec et al. that not all processes" +echo "called MPI_Finalize(). This is an intended characteristic of the" +echo "test and should not be considered an error." +echo "********************************************************************" +eval ${RUNPARALLELSCRIPT} ./t_pflush1 + + +# =========================================== +# Run the second parallel flush test program +# The return code of this call is the return +# code of the script. +# =========================================== +eval ${RUNPARALLELSCRIPT} ./t_pflush2 + diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c index c3da73f..cc32dee 100644 --- a/testpar/testphdf5.c +++ b/testpar/testphdf5.c @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* @@ -20,24 +17,21 @@ #include "testphdf5.h" #ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ +#define PATH_MAX 512 +#endif /* !PATH_MAX */ /* global variables */ int dim0; int dim1; int chunkdim0; int chunkdim1; -int nerrors = 0; /* errors count */ -int ndatasets = 300; /* number of datasets to create*/ -int ngroups = 512; /* number of groups to create in root - * group. */ -int facc_type = FACC_MPIO; /*Test file access type */ +int nerrors = 0; /* errors count */ +int ndatasets = 300; /* number of datasets to create*/ +int ngroups = 512; /* number of groups to create in root + * group. */ +int facc_type = FACC_MPIO; /*Test file access type */ int dxfer_coll_type = DXFER_COLLECTIVE_IO; -H5E_auto2_t old_func; /* previous error handler */ -void *old_client_data; /* previous error handler arg.*/ - /* other option flags */ /* FILENAME and filenames must have the same number of names. @@ -45,13 +39,11 @@ void *old_client_data; /* previous error handler arg.*/ * created in one test is accessed by a different test. * filenames[0] is reserved as the file name for PARATESTFILE. */ -#define NFILENAME 2 +#define NFILENAME 2 #define PARATESTFILE filenames[0] -const char *FILENAME[NFILENAME]={ - "ParaTest", - NULL}; -char filenames[NFILENAME][PATH_MAX]; -hid_t fapl; /* file access property list */ +const char *FILENAME[NFILENAME] = {"ParaTest", NULL}; +char *filenames[NFILENAME]; +hid_t fapl; /* file access property list */ #ifdef USE_PAUSE /* pause the process for a moment to allow debugger to attach if desired. */ @@ -60,15 +52,16 @@ hid_t fapl; /* file access property list */ #include <sys/types.h> #include <sys/stat.h> -void pause_proc(void) +void +pause_proc(void) { - int pid; - h5_stat_t statbuf; - char greenlight[] = "go"; - int maxloop = 10; - int loops = 0; - int time_int = 10; + int pid; + h5_stat_t statbuf; + char greenlight[] = "go"; + int maxloop = 10; + int loops = 0; + int time_int = 10; /* mpi variables */ int mpi_size, mpi_rank; @@ -81,28 +74,28 @@ void pause_proc(void) MPI_Get_processor_name(mpi_name, &mpi_namelen); if (MAINPROCESS) - while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){ - if (!loops++){ - printf("Proc %d (%*s, %d): to debug, attach %d\n", - mpi_rank, mpi_namelen, mpi_name, pid, pid); - } - printf("waiting(%ds) for file %s ...\n", time_int, greenlight); - fflush(stdout); - sleep(time_int); - } + while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) { + if (!loops++) { + HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid, + pid); + } + HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight); + HDfflush(stdout); + HDsleep(time_int); + } MPI_Barrier(MPI_COMM_WORLD); } /* Use the Profile feature of MPI to call the pause_proc() */ -int MPI_Init(int *argc, char ***argv) +int +MPI_Init(int *argc, char ***argv) { int ret_code; - ret_code=PMPI_Init(argc, argv); + ret_code = PMPI_Init(argc, argv); pause_proc(); return (ret_code); } -#endif /* USE_PAUSE */ - +#endif /* USE_PAUSE */ /* * Show command usage @@ -110,198 +103,203 @@ int MPI_Init(int *argc, char ***argv) static void usage(void) { - printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " - "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); - printf("\t-m<n_datasets>" - "\tset number of datasets for the multiple dataset test\n"); - printf("\t-n<n_groups>" - "\tset number of groups for the multiple group test\n"); - printf("\t-f <prefix>\tfilename prefix\n"); - printf("\t-2\t\tuse Split-file together with MPIO\n"); - printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", - ROW_FACTOR, COL_FACTOR); - printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - printf("\n"); + HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] " + "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n"); + HDprintf("\t-m<n_datasets>" + "\tset number of datasets for the multiple dataset test\n"); + HDprintf("\t-n<n_groups>" + "\tset number of groups for the multiple group test\n"); + HDprintf("\t-f <prefix>\tfilename prefix\n"); + HDprintf("\t-2\t\tuse Split-file together with MPIO\n"); + HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR, + COL_FACTOR); + HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); + HDprintf("\n"); } - /* * parse the command line options */ static int parse_options(int argc, char **argv) { - int mpi_size, mpi_rank; /* mpi variables */ + int mpi_size, mpi_rank; /* mpi variables */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* setup default chunk-size. Make sure sizes are > 0 */ - chunkdim0 = (dim0+9)/10; - chunkdim1 = (dim1+9)/10; - - while (--argc){ - if (**(++argv) != '-'){ - break; - }else{ - switch(*(*argv+1)){ - case 'm': ndatasets = atoi((*argv+1)+1); - if (ndatasets < 0){ - nerrors++; - return(1); - } - break; - case 'n': ngroups = atoi((*argv+1)+1); - if (ngroups < 0){ - nerrors++; - return(1); - } - break; - case 'f': if (--argc < 1) { - nerrors++; - return(1); - } - if (**(++argv) == '-') { - nerrors++; - return(1); - } - paraprefix = *argv; - break; - case 'i': /* Collective MPI-IO access with independent IO */ - dxfer_coll_type = DXFER_INDEPENDENT_IO; - break; - case '2': /* Use the split-file driver with MPIO access */ - /* Can use $HDF5_METAPREFIX to define the */ - /* meta-file-prefix. */ - facc_type = FACC_MPIO | FACC_SPLIT; - break; - case 'd': /* dimensizes */ - if (--argc < 2){ - nerrors++; - return(1); - } - dim0 = atoi(*(++argv))*mpi_size; - argc--; - dim1 = atoi(*(++argv))*mpi_size; - /* set default chunkdim sizes too */ - chunkdim0 = (dim0+9)/10; - chunkdim1 = (dim1+9)/10; - break; - case 'c': /* chunk dimensions */ - if (--argc < 2){ - nerrors++; - return(1); - } - chunkdim0 = atoi(*(++argv)); - argc--; - chunkdim1 = atoi(*(++argv)); - break; - case 'h': /* print help message--return with nerrors set */ - return(1); - default: printf("Illegal option(%s)\n", *argv); - nerrors++; - return(1); - } - } + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + + while (--argc) { + if (**(++argv) != '-') { + break; + } + else { + switch (*(*argv + 1)) { + case 'm': + ndatasets = atoi((*argv + 1) + 1); + if (ndatasets < 0) { + nerrors++; + return (1); + } + break; + case 'n': + ngroups = atoi((*argv + 1) + 1); + if (ngroups < 0) { + nerrors++; + return (1); + } + break; + case 'f': + if (--argc < 1) { + nerrors++; + return (1); + } + if (**(++argv) == '-') { + nerrors++; + return (1); + } + paraprefix = *argv; + break; + case 'i': /* Collective MPI-IO access with independent IO */ + dxfer_coll_type = DXFER_INDEPENDENT_IO; + break; + case '2': /* Use the split-file driver with MPIO access */ + /* Can use $HDF5_METAPREFIX to define the */ + /* meta-file-prefix. */ + facc_type = FACC_MPIO | FACC_SPLIT; + break; + case 'd': /* dimensizes */ + if (--argc < 2) { + nerrors++; + return (1); + } + dim0 = atoi(*(++argv)) * mpi_size; + argc--; + dim1 = atoi(*(++argv)) * mpi_size; + /* set default chunkdim sizes too */ + chunkdim0 = (dim0 + 9) / 10; + chunkdim1 = (dim1 + 9) / 10; + break; + case 'c': /* chunk dimensions */ + if (--argc < 2) { + nerrors++; + return (1); + } + chunkdim0 = atoi(*(++argv)); + argc--; + chunkdim1 = atoi(*(++argv)); + break; + case 'h': /* print help message--return with nerrors set */ + return (1); + default: + HDprintf("Illegal option(%s)\n", *argv); + nerrors++; + return (1); + } + } } /*while*/ /* check validity of dimension and chunk sizes */ - if (dim0 <= 0 || dim1 <= 0){ - printf("Illegal dim sizes (%d, %d)\n", dim0, dim1); - nerrors++; - return(1); + if (dim0 <= 0 || dim1 <= 0) { + HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); + nerrors++; + return (1); } - if (chunkdim0 <= 0 || chunkdim1 <= 0){ - printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); - nerrors++; - return(1); + if (chunkdim0 <= 0 || chunkdim1 <= 0) { + HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); + nerrors++; + return (1); } /* Make sure datasets can be divided into equal portions by the processes */ - if ((dim0 % mpi_size) || (dim1 % mpi_size)){ - if (MAINPROCESS) - printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", - dim0, dim1, mpi_size); - nerrors++; - return(1); + if ((dim0 % mpi_size) || (dim1 % mpi_size)) { + if (MAINPROCESS) + HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size); + nerrors++; + return (1); } /* compose the test filenames */ { - int i, n; - - n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - - for (i=0; i < n; i++) - if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i])) - == NULL){ - printf("h5_fixname failed\n"); - nerrors++; - return(1); - } - printf("Test filenames are:\n"); - for (i=0; i < n; i++) - printf(" %s\n", filenames[i]); + int i, n; + + n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */ + + for (i = 0; i < n; i++) + if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) { + HDprintf("h5_fixname failed\n"); + nerrors++; + return (1); + } + HDprintf("Test filenames are:\n"); + for (i = 0; i < n; i++) + HDprintf(" %s\n", filenames[i]); } - return(0); + return (0); } - /* * Create the appropriate File access property list */ hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) { - hid_t ret_pl = -1; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ + hid_t ret_pl = -1; + herr_t ret; /* generic return value */ + int mpi_rank; /* mpi variables */ /* need the rank for error checking macros */ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - ret_pl = H5Pcreate (H5P_FILE_ACCESS); + ret_pl = H5Pcreate(H5P_FILE_ACCESS); VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO){ - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - return(ret_pl); + return (ret_pl); + + if (l_facc_type == FACC_MPIO) { + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(ret_pl, comm, info); + VRFY((ret >= 0), ""); + ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); + VRFY((ret >= 0), ""); + ret = H5Pset_coll_metadata_write(ret_pl, TRUE); + VRFY((ret >= 0), ""); + return (ret_pl); } - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){ - hid_t mpio_pl; - - mpio_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return(ret_pl); + if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) { + hid_t mpio_pl; + + mpio_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((mpio_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_mpio(mpio_pl, comm, info); + VRFY((ret >= 0), ""); + + /* setup file access template */ + ret_pl = H5Pcreate(H5P_FILE_ACCESS); + VRFY((ret_pl >= 0), ""); + /* set Parallel access with communicator */ + ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); + VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); + H5Pclose(mpio_pl); + return (ret_pl); } /* unknown file access types */ return (ret_pl); } - -int main(int argc, char **argv) +int +main(int argc, char **argv) { - int mpi_size, mpi_rank; /* mpi variables */ + int mpi_size, mpi_rank; /* mpi variables */ H5Ptest_param_t ndsets_params, ngroups_params; H5Ptest_param_t collngroups_params; H5Ptest_param_t io_mode_confusion_params; @@ -317,13 +315,13 @@ int main(int argc, char **argv) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - dim0 = ROW_FACTOR*mpi_size; - dim1 = COL_FACTOR*mpi_size; + dim0 = ROW_FACTOR * mpi_size; + dim1 = COL_FACTOR * mpi_size; - if (MAINPROCESS){ - printf("===================================\n"); - printf("PHDF5 TESTS START\n"); - printf("===================================\n"); + if (MAINPROCESS) { + HDprintf("===================================\n"); + HDprintf("PHDF5 TESTS START\n"); + HDprintf("===================================\n"); } /* Attempt to turn off atexit post processing so that in case errors @@ -331,225 +329,196 @@ int main(int argc, char **argv) * hang in the atexit post processing in which it may try to make MPI * calls. By then, MPI calls may not work. */ - if (H5dont_atexit() < 0){ - printf("Failed to turn off atexit processing. Continue.\n"); + if (H5dont_atexit() < 0) { + HDprintf("Failed to turn off atexit processing. Continue.\n"); }; H5open(); h5_show_hostname(); + HDmemset(filenames, 0, sizeof(filenames)); + for (int i = 0; i < NFILENAME; i++) { + if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) { + HDprintf("couldn't allocate filename array\n"); + MPI_Abort(MPI_COMM_WORLD, -1); + } + } + /* Initialize testing framework */ TestInit(argv[0], usage, parse_options); /* Tests are generally arranged from least to most complexity... */ - AddTest("mpiodup", test_fapl_mpio_dup, NULL, - "fapl_mpio duplicate", NULL); - - AddTest("split", test_split_comm_access, NULL, - "dataset using split communicators", PARATESTFILE); - - AddTest("idsetw", dataset_writeInd, NULL, - "dataset independent write", PARATESTFILE); - AddTest("idsetr", dataset_readInd, NULL, - "dataset independent read", PARATESTFILE); - - AddTest("cdsetw", dataset_writeAll, NULL, - "dataset collective write", PARATESTFILE); - AddTest("cdsetr", dataset_readAll, NULL, - "dataset collective read", PARATESTFILE); - - AddTest("eidsetw", extend_writeInd, NULL, - "extendible dataset independent write", PARATESTFILE); - AddTest("eidsetr", extend_readInd, NULL, - "extendible dataset independent read", PARATESTFILE); - AddTest("ecdsetw", extend_writeAll, NULL, - "extendible dataset collective write", PARATESTFILE); - AddTest("ecdsetr", extend_readAll, NULL, - "extendible dataset collective read", PARATESTFILE); - AddTest("eidsetw2", extend_writeInd2, NULL, - "extendible dataset independent write #2", PARATESTFILE); - AddTest("selnone", none_selection_chunk, NULL, - "chunked dataset with none-selection", PARATESTFILE); - AddTest("calloc", test_chunk_alloc, NULL, - "parallel extend Chunked allocation on serial file", PARATESTFILE); - AddTest("fltread", test_filter_read, NULL, - "parallel read of dataset written serially with filters", PARATESTFILE); + AddTest("mpiodup", test_fapl_mpio_dup, NULL, "fapl_mpio duplicate", NULL); + + AddTest("split", test_split_comm_access, NULL, "dataset using split communicators", PARATESTFILE); + AddTest("h5oflusherror", test_oflush, NULL, "H5Oflush failure", PARATESTFILE); + +#ifdef PB_OUT /* temporary: disable page buffering when parallel */ + AddTest("page_buffer", test_page_buffer_access, NULL, "page buffer usage in parallel", PARATESTFILE); +#endif + + AddTest("props", test_file_properties, NULL, "Coll Metadata file property settings", PARATESTFILE); + + AddTest("delete", test_delete, NULL, "MPI-IO VFD file delete", PARATESTFILE); + + AddTest("idsetw", dataset_writeInd, NULL, "dataset independent write", PARATESTFILE); + AddTest("idsetr", dataset_readInd, NULL, "dataset independent read", PARATESTFILE); + + AddTest("cdsetw", dataset_writeAll, NULL, "dataset collective write", PARATESTFILE); + AddTest("cdsetr", dataset_readAll, NULL, "dataset collective read", PARATESTFILE); + + AddTest("eidsetw", extend_writeInd, NULL, "extendible dataset independent write", PARATESTFILE); + AddTest("eidsetr", extend_readInd, NULL, "extendible dataset independent read", PARATESTFILE); + AddTest("ecdsetw", extend_writeAll, NULL, "extendible dataset collective write", PARATESTFILE); + AddTest("ecdsetr", extend_readAll, NULL, "extendible dataset collective read", PARATESTFILE); + AddTest("eidsetw2", extend_writeInd2, NULL, "extendible dataset independent write #2", PARATESTFILE); + AddTest("selnone", none_selection_chunk, NULL, "chunked dataset with none-selection", PARATESTFILE); + AddTest("calloc", test_chunk_alloc, NULL, "parallel extend Chunked allocation on serial file", + PARATESTFILE); + AddTest("fltread", test_filter_read, NULL, "parallel read of dataset written serially with filters", + PARATESTFILE); #ifdef H5_HAVE_FILTER_DEFLATE - AddTest("cmpdsetr", compress_readAll, NULL, - "compressed dataset collective read", PARATESTFILE); + AddTest("cmpdsetr", compress_readAll, NULL, "compressed dataset collective read", PARATESTFILE); #endif /* H5_HAVE_FILTER_DEFLATE */ - AddTest("zerodsetr", zero_dim_dset, NULL, - "zero dim dset", PARATESTFILE); + AddTest("zerodsetr", zero_dim_dset, NULL, "zero dim dset", PARATESTFILE); - ndsets_params.name = PARATESTFILE; + ndsets_params.name = PARATESTFILE; ndsets_params.count = ndatasets; - AddTest("ndsetw", multiple_dset_write, NULL, - "multiple datasets write", &ndsets_params); + AddTest("ndsetw", multiple_dset_write, NULL, "multiple datasets write", &ndsets_params); - ngroups_params.name = PARATESTFILE; + ngroups_params.name = PARATESTFILE; ngroups_params.count = ngroups; - AddTest("ngrpw", multiple_group_write, NULL, - "multiple groups write", &ngroups_params); - AddTest("ngrpr", multiple_group_read, NULL, - "multiple groups read", &ngroups_params); + AddTest("ngrpw", multiple_group_write, NULL, "multiple groups write", &ngroups_params); + AddTest("ngrpr", multiple_group_read, NULL, "multiple groups read", &ngroups_params); - AddTest("compact", compact_dataset, NULL, - "compact dataset test", PARATESTFILE); + AddTest("compact", compact_dataset, NULL, "compact dataset test", PARATESTFILE); - collngroups_params.name = PARATESTFILE; + collngroups_params.name = PARATESTFILE; collngroups_params.count = ngroups; - AddTest("cngrpw", collective_group_write, NULL, - "collective group and dataset write", &collngroups_params); - AddTest("ingrpr", independent_group_read, NULL, - "independent group and dataset read", &collngroups_params); + /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */ + AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL, + "collective grp/dset write - independent grp/dset read", &collngroups_params); #ifndef H5_HAVE_WIN32_API - AddTest("bigdset", big_dataset, NULL, - "big dataset test", PARATESTFILE); + AddTest("bigdset", big_dataset, NULL, "big dataset test", PARATESTFILE); #else - printf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); + HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n"); #endif - AddTest("fill", dataset_fillvalue, NULL, - "dataset fill value", PARATESTFILE); - - AddTest("cchunk1", - coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE); - AddTest("cchunk2", - coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE); - AddTest("cchunk3", - coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE); - AddTest("cchunk4", - coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE); - - if((mpi_size < 3)&& MAINPROCESS ) { - printf("Collective chunk IO optimization APIs "); - printf("needs at least 3 processes to participate\n"); - printf("Collective chunk IO API tests will be skipped \n"); + AddTest("fill", dataset_fillvalue, NULL, "dataset fill value", PARATESTFILE); + + AddTest("cchunk1", coll_chunk1, NULL, "simple collective chunk io", PARATESTFILE); + AddTest("cchunk2", coll_chunk2, NULL, "noncontiguous collective chunk io", PARATESTFILE); + AddTest("cchunk3", coll_chunk3, NULL, "multi-chunk collective chunk io", PARATESTFILE); + AddTest("cchunk4", coll_chunk4, NULL, "collective chunk io with partial non-selection ", PARATESTFILE); + + if ((mpi_size < 3) && MAINPROCESS) { + HDprintf("Collective chunk IO optimization APIs "); + HDprintf("needs at least 3 processes to participate\n"); + HDprintf("Collective chunk IO API tests will be skipped \n"); } - AddTest((mpi_size <3)? "-cchunk5":"cchunk5" , - coll_chunk5,NULL, - "linked chunk collective IO without optimization",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6", - coll_chunk6,NULL, - "multi-chunk collective IO with direct request",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7", - coll_chunk7,NULL, - "linked chunk collective IO with optimization",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8", - coll_chunk8,NULL, - "linked chunk collective IO transferring to multi-chunk",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9", - coll_chunk9,NULL, - "multiple chunk collective IO with optimization",PARATESTFILE); - AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10", - coll_chunk10,NULL, - "multiple chunk collective IO transferring to independent IO",PARATESTFILE); - - - -/* irregular collective IO tests*/ - AddTest("ccontw", - coll_irregular_cont_write,NULL, - "collective irregular contiguous write",PARATESTFILE); - AddTest("ccontr", - coll_irregular_cont_read,NULL, - "collective irregular contiguous read",PARATESTFILE); - AddTest("cschunkw", - coll_irregular_simple_chunk_write,NULL, - "collective irregular simple chunk write",PARATESTFILE); - AddTest("cschunkr", - coll_irregular_simple_chunk_read,NULL, - "collective irregular simple chunk read",PARATESTFILE); - AddTest("ccchunkw", - coll_irregular_complex_chunk_write,NULL, - "collective irregular complex chunk write",PARATESTFILE); - AddTest("ccchunkr", - coll_irregular_complex_chunk_read,NULL, - "collective irregular complex chunk read",PARATESTFILE); - - AddTest("null", null_dataset, NULL, - "null dataset test", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk5" : "cchunk5", coll_chunk5, NULL, + "linked chunk collective IO without optimization", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk6" : "cchunk6", coll_chunk6, NULL, + "multi-chunk collective IO with direct request", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk7" : "cchunk7", coll_chunk7, NULL, + "linked chunk collective IO with optimization", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk8" : "cchunk8", coll_chunk8, NULL, + "linked chunk collective IO transferring to multi-chunk", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk9" : "cchunk9", coll_chunk9, NULL, + "multiple chunk collective IO with optimization", PARATESTFILE); + AddTest((mpi_size < 3) ? "-cchunk10" : "cchunk10", coll_chunk10, NULL, + "multiple chunk collective IO transferring to independent IO", PARATESTFILE); + + /* irregular collective IO tests*/ + AddTest("ccontw", coll_irregular_cont_write, NULL, "collective irregular contiguous write", PARATESTFILE); + AddTest("ccontr", coll_irregular_cont_read, NULL, "collective irregular contiguous read", PARATESTFILE); + AddTest("cschunkw", coll_irregular_simple_chunk_write, NULL, "collective irregular simple chunk write", + PARATESTFILE); + AddTest("cschunkr", coll_irregular_simple_chunk_read, NULL, "collective irregular simple chunk read", + PARATESTFILE); + AddTest("ccchunkw", coll_irregular_complex_chunk_write, NULL, "collective irregular complex chunk write", + PARATESTFILE); + AddTest("ccchunkr", coll_irregular_complex_chunk_read, NULL, "collective irregular complex chunk read", + PARATESTFILE); + + AddTest("null", null_dataset, NULL, "null dataset test", PARATESTFILE); io_mode_confusion_params.name = PARATESTFILE; io_mode_confusion_params.count = 0; /* value not used */ - AddTest("I/Omodeconf", io_mode_confusion, NULL, - "I/O mode confusion test -- hangs quickly on failure", + AddTest("I/Omodeconf", io_mode_confusion, NULL, "I/O mode confusion test -- hangs quickly on failure", &io_mode_confusion_params); - if((mpi_size < 3) && MAINPROCESS) { - printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); - printf("rr_obj_hdr_flush_confusion test will be skipped \n"); + if ((mpi_size < 3) && MAINPROCESS) { + HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n"); + HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n"); } - if(mpi_size > 2) { - rr_obj_flush_confusion_params.name = PARATESTFILE; + if (mpi_size > 2) { + rr_obj_flush_confusion_params.name = PARATESTFILE; rr_obj_flush_confusion_params.count = 0; /* value not used */ AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL, - "round robin object header flush confusion test", - &rr_obj_flush_confusion_params); + "round robin object header flush confusion test", &rr_obj_flush_confusion_params); } - AddTest("tldsc", - lower_dim_size_comp_test, NULL, - "test lower dim size comp in span tree to mpi derived type", - PARATESTFILE); + AddTest("alnbg1", chunk_align_bug_1, NULL, "Chunk allocation with alignment bug.", PARATESTFILE); - AddTest("lccio", - link_chunk_collective_io_test, NULL, - "test mpi derived type management", - PARATESTFILE); + AddTest("tldsc", lower_dim_size_comp_test, NULL, + "test lower dim size comp in span tree to mpi derived type", PARATESTFILE); - AddTest("actualio", actual_io_mode_tests, NULL, - "test actual io mode proprerty", - PARATESTFILE); + AddTest("lccio", link_chunk_collective_io_test, NULL, "test mpi derived type management", PARATESTFILE); + + AddTest("actualio", actual_io_mode_tests, NULL, "test actual io mode proprerty", PARATESTFILE); - AddTest("nocolcause", no_collective_cause_tests, NULL, - "test cause for broken collective io", + AddTest("nocolcause", no_collective_cause_tests, NULL, "test cause for broken collective io", PARATESTFILE); - AddTest("edpl", test_plist_ed, NULL, - "encode/decode Property Lists", NULL); + AddTest("edpl", test_plist_ed, NULL, "encode/decode Property Lists", NULL); + + AddTest("extlink", external_links, NULL, "test external links", NULL); - if((mpi_size < 2) && MAINPROCESS) { - printf("File Image Ops daisy chain test needs at least 2 processes.\n"); - printf("File Image Ops daisy chain test will be skipped \n"); + if ((mpi_size < 2) && MAINPROCESS) { + HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n"); + HDprintf("File Image Ops daisy chain test will be skipped \n"); } - AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, + AddTest((mpi_size < 2) ? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL, "file image ops daisy chain", NULL); - if((mpi_size < 2)&& MAINPROCESS ) { - printf("Atomicity tests need at least 2 processes to participate\n"); - printf("8 is more recommended.. Atomicity tests will be skipped \n"); + if ((mpi_size < 2) && MAINPROCESS) { + HDprintf("Atomicity tests need at least 2 processes to participate\n"); + HDprintf("8 is more recommended.. Atomicity tests will be skipped \n"); } else if (facc_type != FACC_MPIO && MAINPROCESS) { - printf("Atomicity tests will not work with a non MPIO VFD\n"); + HDprintf("Atomicity tests will not work with a non MPIO VFD\n"); } - else if(mpi_size >= 2 && facc_type == FACC_MPIO){ - AddTest("atomicity", dataset_atomicity, NULL, - "dataset atomic updates", PARATESTFILE); + else if (mpi_size >= 2 && facc_type == FACC_MPIO) { + AddTest("atomicity", dataset_atomicity, NULL, "dataset atomic updates", PARATESTFILE); } - AddTest("denseattr", test_dense_attr, NULL, - "Store Dense Attributes", PARATESTFILE); + AddTest("denseattr", test_dense_attr, NULL, "Store Dense Attributes", PARATESTFILE); + AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL, + "Collective Metadata read with some ranks having no selection", PARATESTFILE); + AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL, + "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE); + AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL, + "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE); /* Display testing information */ TestInfo(argv[0]); /* setup file access property list */ - fapl = H5Pcreate (H5P_FILE_ACCESS); + fapl = H5Pcreate(H5P_FILE_ACCESS); H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL); /* Parse command line arguments */ TestParseCmdLine(argc, argv); - if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){ - printf("===================================\n" - " Using Independent I/O with file set view to replace collective I/O \n" - "===================================\n"); + if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) { + HDprintf("===================================\n" + " Using Independent I/O with file set view to replace collective I/O \n" + "===================================\n"); } - /* Perform requested testing */ PerformTests(); @@ -563,7 +532,7 @@ int main(int argc, char **argv) TestSummary(); /* Clean up test files */ - h5_cleanup(FILENAME, fapl); + h5_clean_files(FILENAME, fapl); nerrors += GetTestNumErrs(); @@ -571,24 +540,32 @@ int main(int argc, char **argv) { int temp; MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); - nerrors=temp; + nerrors = temp; } - if (MAINPROCESS){ /* only process 0 reports */ - printf("===================================\n"); - if (nerrors) - printf("***PHDF5 tests detected %d errors***\n", nerrors); - else - printf("PHDF5 tests finished with no errors\n"); - printf("===================================\n"); + if (MAINPROCESS) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrors) + HDprintf("***PHDF5 tests detected %d errors***\n", nerrors); + else + HDprintf("PHDF5 tests finished with no errors\n"); + HDprintf("===================================\n"); } + + for (int i = 0; i < NFILENAME; i++) { + HDfree(filenames[i]); + filenames[i] = NULL; + } + /* close HDF5 library */ H5close(); + /* Release test infrastructure */ + TestShutdown(); + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ MPI_Finalize(); /* cannot just return (nerrors) because exit code is limited to 1byte */ - return(nerrors!=0); + return (nerrors != 0); } - diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h index 3597b17..14b8297 100644 --- a/testpar/testphdf5.h +++ b/testpar/testphdf5.h @@ -1,16 +1,13 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the files COPYING and Copyright.html. COPYING can be found at the root * - * of the source code distribution tree; Copyright.html can be found at the * - * root level of an installed copy of the electronic HDF5 document set and * - * is linked from the top-level documents page. It can also be found at * - * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * - * access to either file, you may request a copy from help@hdfgroup.org. * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* common definitions used by all parallel hdf5 test programs. */ @@ -20,177 +17,174 @@ #include "testpar.h" -enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, - API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE, - API_MULTI_COLL,API_MULTI_IND}; +enum H5TEST_COLL_CHUNK_API { + API_NONE = 0, + API_LINK_HARD, + API_MULTI_HARD, + API_LINK_TRUE, + API_LINK_FALSE, + API_MULTI_COLL, + API_MULTI_IND +}; #ifndef FALSE -#define FALSE 0 +#define FALSE 0 #endif #ifndef TRUE -#define TRUE 1 +#define TRUE 1 #endif - /* Constants definitions */ -#define DIM0 600 /* Default dataset sizes. */ -#define DIM1 1200 /* Values are from a monitor pixel sizes */ -#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ -#define COL_FACTOR 16 /* Nominal column factor for dataset size */ -#define RANK 2 -#define DATASETNAME1 "Data1" -#define DATASETNAME2 "Data2" -#define DATASETNAME3 "Data3" -#define DATASETNAME4 "Data4" -#define DATASETNAME5 "Data5" -#define DATASETNAME6 "Data6" -#define DATASETNAME7 "Data7" -#define DATASETNAME8 "Data8" -#define DATASETNAME9 "Data9" +#define DIM0 600 /* Default dataset sizes. */ +#define DIM1 1200 /* Values are from a monitor pixel sizes */ +#define ROW_FACTOR 8 /* Nominal row factor for dataset size */ +#define COL_FACTOR 16 /* Nominal column factor for dataset size */ +#define RANK 2 +#define DATASETNAME1 "Data1" +#define DATASETNAME2 "Data2" +#define DATASETNAME3 "Data3" +#define DATASETNAME4 "Data4" +#define DATASETNAME5 "Data5" +#define DATASETNAME6 "Data6" +#define DATASETNAME7 "Data7" +#define DATASETNAME8 "Data8" +#define DATASETNAME9 "Data9" /* point selection order */ -#define IN_ORDER 1 +#define IN_ORDER 1 #define OUT_OF_ORDER 2 /* Hyperslab layout styles */ -#define BYROW 1 /* divide into slabs of rows */ -#define BYCOL 2 /* divide into blocks of columns */ -#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */ -#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */ +#define BYROW 1 /* divide into slabs of rows */ +#define BYCOL 2 /* divide into blocks of columns */ +#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */ +#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */ /* File_Access_type bits */ -#define FACC_DEFAULT 0x0 /* default */ -#define FACC_MPIO 0x1 /* MPIO */ -#define FACC_SPLIT 0x2 /* Split File */ +#define FACC_DEFAULT 0x0 /* default */ +#define FACC_MPIO 0x1 /* MPIO */ +#define FACC_SPLIT 0x2 /* Split File */ -#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ +#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/ #define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */ /*Constants for collective chunk definitions */ -#define SPACE_DIM1 24 -#define SPACE_DIM2 4 -#define BYROW_CONT 1 -#define BYROW_DISCONT 2 -#define BYROW_SELECTNONE 3 +#define SPACE_DIM1 24 +#define SPACE_DIM2 4 +#define BYROW_CONT 1 +#define BYROW_DISCONT 2 +#define BYROW_SELECTNONE 3 #define BYROW_SELECTUNBALANCE 4 -#define BYROW_SELECTINCHUNK 5 - -#define DIMO_NUM_CHUNK 4 -#define DIM1_NUM_CHUNK 2 -#define LINK_TRUE_NUM_CHUNK 2 -#define LINK_FALSE_NUM_CHUNK 6 -#define MULTI_TRUE_PERCENT 50 -#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true" +#define BYROW_SELECTINCHUNK 5 + +#define DIMO_NUM_CHUNK 4 +#define DIM1_NUM_CHUNK 2 +#define LINK_TRUE_NUM_CHUNK 2 +#define LINK_FALSE_NUM_CHUNK 6 +#define MULTI_TRUE_PERCENT 50 +#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true" #define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false" -#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard" +#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard" #define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard" #define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll" #define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp" #define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name" - /*Constants for MPI derived data type generated from span tree */ -#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */ -#define MSPACE1_DIM 27000 /* Dataset size in memory */ -#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */ -#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */ -#define FSPACE_DIM2 3600 /* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */ -#define MSPACE_RANK 2 -#define MSPACE_DIM1 9 -#define MSPACE_DIM2 3600 -#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ -#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ -#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/ -#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/ -#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ -#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/ -#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ -#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/ - -#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ -#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/ -#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ -#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/ -#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/ -#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ - -#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/ -#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ - - - -#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ -#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ -#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ - - -#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ -#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/ -#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/ - - -#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ -#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ -#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ - -#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ -#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ -#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ -#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ -#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ -#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ -#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ -#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ - - -#define NPOINTS 4 /* Number of points that will be selected - and overwritten */ +#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */ +#define MSPACE1_DIM 27000 /* Dataset size in memory */ +#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */ +#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */ +#define FSPACE_DIM2 3600 +/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */ +#define MSPACE_RANK 2 +#define MSPACE_DIM1 9 +#define MSPACE_DIM2 3600 +#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ +#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ +#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/ +#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/ +#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ +#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/ +#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ +#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/ + +#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/ +#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/ +#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/ +#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/ +#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/ +#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ + +#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/ +#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ + +#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ +#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ +#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ + +#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ +#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/ +#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/ + +#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/ +#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/ +#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/ + +#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/ +#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/ +#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/ +#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/ +#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/ +#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/ +#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/ +#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/ + +#define NPOINTS \ + 4 /* Number of points that will be selected \ + and overwritten */ /* Definitions of the selection mode for the test_actual_io_function. */ -#define TEST_ACTUAL_IO_NO_COLLECTIVE 0 -#define TEST_ACTUAL_IO_RESET 1 -#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2 -#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3 -#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4 -#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5 -#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6 -#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7 -#define TEST_ACTUAL_IO_LINK_CHUNK 8 -#define TEST_ACTUAL_IO_CONTIGUOUS 9 +#define TEST_ACTUAL_IO_NO_COLLECTIVE 0 +#define TEST_ACTUAL_IO_RESET 1 +#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2 +#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3 +#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4 +#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5 +#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6 +#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7 +#define TEST_ACTUAL_IO_LINK_CHUNK 8 +#define TEST_ACTUAL_IO_CONTIGUOUS 9 /* Definitions of the selection mode for the no_collective_cause_tests function. */ #define TEST_COLLECTIVE 0x001 -#define TEST_SET_INDEPENDENT 0x002 +#define TEST_SET_INDEPENDENT 0x002 #define TEST_DATATYPE_CONVERSION 0x004 #define TEST_DATA_TRANSFORMS 0x008 #define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010 #define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020 #define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040 -#define TEST_FILTERS 0x080 -/* TEST_FILTERS will take place of this after supporting mpio + filter for - * H5Dcreate and H5Dwrite */ -#define TEST_FILTERS_READ 0x100 /* Don't erase these lines, they are put here for debugging purposes */ /* @@ -207,12 +201,11 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD, #define NPOINTS 4 */ /* end of debugging macro */ - /* type definitions */ -typedef struct H5Ptest_param_t /* holds extra test parameters */ +typedef struct H5Ptest_param_t /* holds extra test parameters */ { - char *name; - int count; + char *name; + int count; } H5Ptest_param_t; /* Dataset data type. Int's can be easily octo dumped. */ @@ -220,31 +213,34 @@ typedef int DATATYPE; /* Shape Same Tests Definitions */ typedef enum { - IND_CONTIG, /* Independent IO on contigous datasets */ - COL_CONTIG, /* Collective IO on contigous datasets */ - IND_CHUNKED, /* Independent IO on chunked datasets */ - COL_CHUNKED /* Collective IO on chunked datasets */ + IND_CONTIG, /* Independent IO on contiguous datasets */ + COL_CONTIG, /* Collective IO on contiguous datasets */ + IND_CHUNKED, /* Independent IO on chunked datasets */ + COL_CHUNKED /* Collective IO on chunked datasets */ } ShapeSameTestMethods; /* Shared global variables */ -extern int dim0, dim1; /*Dataset dimensions */ -extern int chunkdim0, chunkdim1; /*Chunk dimensions */ -extern int nerrors; /*errors count */ -extern H5E_auto2_t old_func; /* previous error handler */ -extern void *old_client_data; /*previous error handler arg.*/ -extern int facc_type; /*Test file access type */ +extern int dim0, dim1; /*Dataset dimensions */ +extern int chunkdim0, chunkdim1; /*Chunk dimensions */ +extern int nerrors; /*errors count */ +extern int facc_type; /*Test file access type */ extern int dxfer_coll_type; /* Test program prototypes */ void test_plist_ed(void); +void external_links(void); void zero_dim_dset(void); +void test_file_properties(void); +void test_delete(void); void multiple_dset_write(void); void multiple_group_write(void); void multiple_group_read(void); +void collective_group_write_independent_group_read(void); void collective_group_write(void); void independent_group_read(void); void test_fapl_mpio_dup(void); void test_split_comm_access(void); +void test_page_buffer_access(void); void dataset_atomicity(void); void dataset_writeInd(void); void dataset_writeAll(void); @@ -284,6 +280,7 @@ void io_mode_confusion(void); void rr_obj_hdr_flush_confusion(void); void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm); void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm); +void chunk_align_bug_1(void); void lower_dim_size_comp_test(void); void link_chunk_collective_io_test(void); void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type); @@ -293,12 +290,16 @@ void file_image_daisy_chain_test(void); void compress_readAll(void); #endif /* H5_HAVE_FILTER_DEFLATE */ void test_dense_attr(void); +void test_partial_no_selection_coll_md_read(void); +void test_multi_chunk_io_addrmap_issue(void); +void test_link_chunk_io_sort_chunk_issue(void); +void test_oflush(void); /* commonly used prototypes */ -hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); +hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type); MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info); -int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], - hsize_t block[], DATATYPE *dataset, DATATYPE *original); -void point_set (hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], - size_t num_points, hsize_t coords[], int order); +int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, + DATATYPE *original); +void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points, + hsize_t coords[], int order); #endif /* PHDF5TEST_H */ |
