summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScot Breitenfeld <brtnfld@hdfgroup.org>2022-04-08 14:25:14 (GMT)
committerGitHub <noreply@github.com>2022-04-08 14:25:14 (GMT)
commit6fad870737605a39103dc8f26b9799e158a3ee16 (patch)
tree1dc7a66327199cd9fa80c714243f33a83f13d1f5
parent304d33f88b657c3a93da311c47a62cfca5af12c3 (diff)
downloadhdf5-feature/parallel_h5repack.zip
hdf5-feature/parallel_h5repack.tar.gz
hdf5-feature/parallel_h5repack.tar.bz2
Sync branch with develop (#1616)feature/parallel_h5repack
Sync branch with develop
-rw-r--r--.gitattributes6
-rw-r--r--.github/CODEOWNERS30
-rw-r--r--.github/FUNDING.yml3
-rw-r--r--.github/workflows/codespell.yml2
-rw-r--r--.github/workflows/main.yml4
-rw-r--r--.github/workflows/pr-check.yml1
-rw-r--r--CMakeFilters.cmake10
-rw-r--r--CMakeInstallation.cmake18
-rw-r--r--CMakeLists.txt22
-rw-r--r--MANIFEST3886
-rw-r--r--README.md (renamed from README.txt)61
-rw-r--r--bin/README2
-rwxr-xr-xbin/bbrelease242
-rwxr-xr-xbin/checkposix11
-rwxr-xr-xbin/chkmanifest154
-rwxr-xr-xbin/h5vers12
-rwxr-xr-xbin/locate_sw238
-rwxr-xr-xbin/release84
-rwxr-xr-xbin/restore.sh3
-rwxr-xr-xbin/runtest966
-rwxr-xr-xbin/snapshot837
-rw-r--r--bin/snapshot_version19
-rwxr-xr-xbin/timekeeper129
-rw-r--r--c++/src/H5AbstractDs.cpp2
-rw-r--r--c++/src/H5Attribute.cpp1
-rw-r--r--c++/src/H5DataSet.cpp1
-rw-r--r--c++/src/H5DataType.cpp1
-rw-r--r--c++/src/H5FaccProp.cpp4
-rw-r--r--c++/src/H5Library.cpp2
-rw-r--r--c++/src/H5Location.cpp2
-rw-r--r--c++/src/H5Location.h2
-rw-r--r--c++/src/H5PropList.cpp1
-rw-r--r--c++/src/cpp_doc_config2
-rw-r--r--c++/test/dsets.cpp1
-rw-r--r--c++/test/tattr.cpp8
-rw-r--r--c++/test/th5s.cpp2
-rw-r--r--c++/test/titerate.cpp9
-rw-r--r--c++/test/tvlstr.cpp10
-rw-r--r--config/apple67
-rw-r--r--config/clang-warnings/developer-general1
-rw-r--r--config/clang-warnings/general16
-rw-r--r--config/cmake/CTestCustom.cmake2
-rw-r--r--config/cmake/H5pubconf.h.in3
-rw-r--r--config/cmake/HDF5PluginMacros.cmake4
-rw-r--r--config/cmake/HDF5_Examples.cmake.in3
-rw-r--r--config/cmake/HDFFortranCompilerFlags.cmake6
-rw-r--r--config/cmake/README.md.cmake.in (renamed from config/cmake/README.txt.cmake.in)2
-rw-r--r--config/cmake/cacheinit.cmake22
-rw-r--r--config/cmake/hdf5-config.cmake.in6
-rw-r--r--config/cmake/libh5cc.in23
-rw-r--r--config/cmake/scripts/HDF5config.cmake4
-rw-r--r--config/cmake_ext_mod/ConfigureChecks.cmake96
-rw-r--r--config/cmake_ext_mod/HDFLibMacros.cmake20
-rw-r--r--config/cmake_ext_mod/HDFMacros.cmake10
-rw-r--r--config/cmake_ext_mod/HDFTests.c4
-rw-r--r--config/cmake_ext_mod/runTest.cmake16
-rw-r--r--config/gnu-warnings/cxx-general1
-rw-r--r--config/gnu-warnings/developer-general3
-rw-r--r--config/gnu-warnings/general14
-rw-r--r--config/intel-warnings/ifort-general2
-rw-r--r--config/intel-warnings/win-ifort-general1
-rw-r--r--config/sanitizer/code-coverage.cmake15
-rw-r--r--config/sanitizer/sanitizers.cmake11
-rw-r--r--configure.ac118
-rw-r--r--doc/img/release-schedule.plantuml45
-rwxr-xr-xdoc/img/release-schedule.pngbin0 -> 16991 bytes
-rw-r--r--doxygen/aliases2
-rw-r--r--doxygen/dox/About.dox2
-rw-r--r--doxygen/dox/Glossary.dox565
-rw-r--r--doxygen/dox/Overview.dox9
-rw-r--r--doxygen/examples/H5.format.1.0.html16
-rw-r--r--doxygen/examples/H5.format.1.1.html16
-rw-r--r--doxygen/hdf5doxy_layout.xml2
-rw-r--r--doxygen/img/HDF5.pngbin0 -> 10660 bytes
-rw-r--r--examples/CMakeLists.txt42
-rw-r--r--examples/CMakeTests.cmake40
-rw-r--r--examples/Makefile.am6
-rw-r--r--examples/h5_extlink.c4
-rw-r--r--examples/ph5_filtered_writes.c490
-rw-r--r--examples/ph5_filtered_writes_no_sel.c370
-rw-r--r--examples/ph5example.c11
-rw-r--r--fortran/src/H5Df.c8
-rw-r--r--fortran/src/H5Dff.F9012
-rw-r--r--fortran/src/H5Ff.c4
-rw-r--r--fortran/test/tH5A_1_8.F906
-rw-r--r--fortran/test/tH5F.F9038
-rw-r--r--fortran/test/tH5P.F904
-rw-r--r--hl/c++/test/ptableTest.cpp2
-rw-r--r--hl/test/test_file_image.c5
-rw-r--r--hl/test/test_image.c6
-rw-r--r--hl/test/test_packet.c2
-rw-r--r--hl/test/test_packet_vlen.c16
-rw-r--r--hl/tools/gif2h5/gif2mem.c32
-rw-r--r--hl/tools/gif2h5/hdfgifwr.c14
-rw-r--r--hl/tools/gif2h5/writehdf.c2
-rw-r--r--hl/tools/h5watch/extend_dset.c2
-rw-r--r--hl/tools/h5watch/h5watch.c12
-rw-r--r--java/examples/datasets/H5Ex_D_Sofloat.java13
-rw-r--r--java/examples/datasets/JavaDatasetExample.sh.in6
-rw-r--r--java/examples/datasets/Makefile.am2
-rw-r--r--java/examples/datatypes/H5Ex_T_ObjectReference.java275
-rw-r--r--java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java335
-rw-r--r--java/examples/datatypes/JavaDatatypeExample.sh.in6
-rw-r--r--java/examples/datatypes/Makefile.am2
-rw-r--r--java/examples/groups/JavaGroupExample.sh.in6
-rw-r--r--java/examples/groups/Makefile.am2
-rw-r--r--java/examples/intro/JavaIntroExample.sh.in6
-rw-r--r--java/examples/intro/Makefile.am2
-rw-r--r--java/examples/testfiles/examples.datasets.H5Ex_D_Sofloat.txt8
-rw-r--r--java/lib/ext/slf4j-nop-1.7.25.jarbin4007 -> 0 bytes
-rw-r--r--java/lib/ext/slf4j-nop-1.7.33.jarbin0 -> 4020 bytes
-rw-r--r--java/lib/ext/slf4j-simple-1.7.25.jarbin15257 -> 0 bytes
-rw-r--r--java/lib/ext/slf4j-simple-1.7.33.jarbin0 -> 15400 bytes
-rw-r--r--java/lib/slf4j-api-1.7.25.jarbin41203 -> 0 bytes
-rw-r--r--java/lib/slf4j-api-1.7.33.jarbin0 -> 41473 bytes
-rw-r--r--java/src/Makefile.am2
-rw-r--r--java/src/hdf/hdf5lib/H5.java4
-rw-r--r--java/src/hdf/hdf5lib/package-info.java2
-rw-r--r--java/src/jni/h5util.c151
-rw-r--r--java/test/CMakeLists.txt1
-rw-r--r--java/test/Makefile.am3
-rw-r--r--java/test/TestH5.java4
-rw-r--r--java/test/TestH5Ocopy.java300
-rw-r--r--java/test/TestH5OcopyOld.java397
-rw-r--r--java/test/junit.sh.in28
-rw-r--r--java/test/testfiles/JUnit-TestH5Ocopy.txt3
-rw-r--r--java/test/testfiles/JUnit-TestH5OcopyOld.txt10
-rw-r--r--release_docs/HISTORY-1_0-1_8_0_rc3.txt2
-rw-r--r--release_docs/HISTORY-1_13.txt2168
-rw-r--r--release_docs/HISTORY-1_8_0-1_10_0.txt2
-rw-r--r--release_docs/RELEASE.txt1460
-rw-r--r--release_docs/USING_CMake_Examples.txt7
-rw-r--r--release_docs/USING_HDF5_CMake.txt19
-rw-r--r--src/CMakeLists.txt95
-rw-r--r--src/H5.c32
-rw-r--r--src/H5AC.c86
-rw-r--r--src/H5ACmpio.c107
-rw-r--r--src/H5ACpublic.h7
-rw-r--r--src/H5B2internal.c2
-rw-r--r--src/H5B2leaf.c2
-rw-r--r--src/H5C.c260
-rw-r--r--src/H5CSprivate.h4
-rw-r--r--src/H5CX.c10
-rw-r--r--src/H5CXprivate.h5
-rw-r--r--src/H5Cepoch.c31
-rw-r--r--src/H5Cimage.c3
-rw-r--r--src/H5Cpkg.h226
-rw-r--r--src/H5Cprivate.h10
-rw-r--r--src/H5Cpublic.h7
-rw-r--r--src/H5Dchunk.c1176
-rw-r--r--src/H5Dcompact.c6
-rw-r--r--src/H5Dcontig.c123
-rw-r--r--src/H5Dearray.c4
-rw-r--r--src/H5Defl.c8
-rw-r--r--src/H5Dfarray.c4
-rw-r--r--src/H5Dint.c55
-rw-r--r--src/H5Dio.c151
-rw-r--r--src/H5Dlayout.c2
-rw-r--r--src/H5Dmpio.c5368
-rw-r--r--src/H5Dpkg.h24
-rw-r--r--src/H5Dpublic.h15
-rw-r--r--src/H5Dselect.c187
-rw-r--r--src/H5EAdbg.c6
-rw-r--r--src/H5EAprivate.h5
-rw-r--r--src/H5EAtest.c2
-rw-r--r--src/H5ES.c55
-rw-r--r--src/H5ESdevelop.h2
-rw-r--r--src/H5ESint.c108
-rw-r--r--src/H5ESlist.c11
-rw-r--r--src/H5ESpkg.h5
-rw-r--r--src/H5ESpublic.h13
-rw-r--r--src/H5FAdblkpage.c2
-rw-r--r--src/H5FAprivate.h9
-rw-r--r--src/H5FAtest.c2
-rw-r--r--src/H5FD.c373
-rw-r--r--src/H5FDcore.c5
-rw-r--r--src/H5FDdevelop.h29
-rw-r--r--src/H5FDdirect.c10
-rw-r--r--src/H5FDfamily.c13
-rw-r--r--src/H5FDhdfs.c5
-rw-r--r--src/H5FDint.c1977
-rw-r--r--src/H5FDlog.c75
-rw-r--r--src/H5FDmirror.c11
-rw-r--r--src/H5FDmirror_priv.h4
-rw-r--r--src/H5FDmpio.c1292
-rw-r--r--src/H5FDmpio.h6
-rw-r--r--src/H5FDmulti.c11
-rw-r--r--src/H5FDperform.c21
-rw-r--r--src/H5FDprivate.h24
-rw-r--r--src/H5FDros3.c11
-rw-r--r--src/H5FDsec2.c5
-rw-r--r--src/H5FDspace.c15
-rw-r--r--src/H5FDsplitter.c13
-rw-r--r--src/H5FDstdio.c75
-rw-r--r--src/H5FLprivate.h18
-rw-r--r--src/H5FS.c38
-rw-r--r--src/H5FSsection.c61
-rw-r--r--src/H5Fio.c91
-rw-r--r--src/H5Fmpi.c196
-rw-r--r--src/H5Fprivate.h12
-rw-r--r--src/H5Fpublic.h4
-rw-r--r--src/H5Gprivate.h2
-rw-r--r--src/H5HFcache.c2
-rw-r--r--src/H5HP.c904
-rw-r--r--src/H5HPprivate.h68
-rw-r--r--src/H5Lpublic.h6
-rw-r--r--src/H5MF.c64
-rw-r--r--src/H5MFaggr.c39
-rw-r--r--src/H5MFsection.c32
-rw-r--r--src/H5MP.c443
-rw-r--r--src/H5MPmodule.h32
-rw-r--r--src/H5MPpkg.h99
-rw-r--r--src/H5MPprivate.h57
-rw-r--r--src/H5MPtest.c213
-rw-r--r--src/H5Ocache.c8
-rw-r--r--src/H5Ocopy.c3
-rw-r--r--src/H5Ocopy_ref.c82
-rw-r--r--src/H5Odtype.c38
-rw-r--r--src/H5Oint.c12
-rw-r--r--src/H5Opkg.h2
-rw-r--r--src/H5Opublic.h6
-rw-r--r--src/H5PB.c69
-rw-r--r--src/H5PBprivate.h6
-rw-r--r--src/H5PLpath.c4
-rw-r--r--src/H5Pfapl.c34
-rw-r--r--src/H5Pmodule.h3
-rw-r--r--src/H5Ppublic.h281
-rw-r--r--src/H5RS.c2
-rw-r--r--src/H5Shyper.c57
-rw-r--r--src/H5Smpio.c6
-rw-r--r--src/H5Spoint.c2
-rw-r--r--src/H5Spublic.h16
-rw-r--r--src/H5TS.c76
-rw-r--r--src/H5TSprivate.h6
-rw-r--r--src/H5Tcommit.c2
-rw-r--r--src/H5Tconv.c2
-rw-r--r--src/H5Tnative.c6
-rw-r--r--src/H5Tprivate.h20
-rw-r--r--src/H5VLcallback.c2
-rw-r--r--src/H5VLnative.h6
-rw-r--r--src/H5VLnative_token.c4
-rw-r--r--src/H5VLpassthru.c13
-rw-r--r--src/H5VMprivate.h4
-rw-r--r--src/H5Z.c14
-rw-r--r--src/H5Znbit.c29
-rw-r--r--src/H5Zscaleoffset.c8
-rw-r--r--src/H5module.h6
-rw-r--r--src/H5mpi.c233
-rw-r--r--src/H5private.h119
-rw-r--r--src/H5public.h11
-rw-r--r--src/H5system.c6
-rw-r--r--src/H5timer.c15
-rw-r--r--src/H5trace.c4
-rw-r--r--src/Makefile.am3
-rw-r--r--test/AtomicWriterReader.txt2
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/CMakeTests.cmake53
-rw-r--r--test/Makefile.am71
-rw-r--r--test/ShellTests.cmake58
-rw-r--r--test/app_ref.c4
-rw-r--r--test/cache_api.c2
-rw-r--r--test/cache_common.c148
-rw-r--r--test/cache_image.c16
-rw-r--r--test/chunk_info.c2
-rw-r--r--test/cmpd_dset.c12
-rw-r--r--test/del_many_dense_attrs.c4
-rw-r--r--test/dsets.c6
-rw-r--r--test/dt_arith.c2
-rw-r--r--test/dtransform.c17
-rw-r--r--test/dtypes.c8
-rw-r--r--test/earray.c9
-rw-r--r--test/enc_dec_plist.c4
-rw-r--r--test/event_set.c513
-rw-r--r--test/external.c11
-rw-r--r--test/external_common.c8
-rw-r--r--test/fheap.c146
-rw-r--r--test/filter_plugin.c10
-rw-r--r--test/flush1.c2
-rw-r--r--test/flush2.c2
-rw-r--r--test/flushrefresh.c4
-rw-r--r--test/genall5.c90
-rw-r--r--test/h5test.c75
-rw-r--r--test/links.c9
-rw-r--r--test/null_vfd_plugin.c75
-rw-r--r--test/page_buffer.c18
-rw-r--r--test/pool.c794
-rw-r--r--test/reserved.c6
-rw-r--r--test/swmr.c40
-rw-r--r--test/swmr_addrem_writer.c2
-rw-r--r--test/swmr_common.c5
-rw-r--r--test/swmr_generator.c2
-rw-r--r--test/swmr_reader.c4
-rw-r--r--test/swmr_remove_reader.c2
-rw-r--r--test/swmr_remove_writer.c2
-rw-r--r--test/swmr_sparse_reader.c4
-rw-r--r--test/swmr_sparse_writer.c2
-rw-r--r--test/swmr_start_write.c2
-rw-r--r--test/swmr_writer.c2
-rw-r--r--test/tattr.c153
-rw-r--r--test/test_abort_fail.sh.in (renamed from test/testabort_fail.sh.in)0
-rw-r--r--test/test_check_version.sh.in (renamed from test/testcheck_version.sh.in)0
-rw-r--r--test/test_error.sh.in (renamed from test/testerror.sh.in)0
-rw-r--r--test/test_external_env.sh.in (renamed from test/testexternal_env.sh.in)0
-rw-r--r--test/test_filter_plugin.sh.in113
-rw-r--r--test/test_flush_refresh.sh.in (renamed from test/testflushrefresh.sh.in)0
-rw-r--r--test/test_libinfo.sh.in (renamed from test/testlibinfo.sh.in)0
-rw-r--r--test/test_links_env.sh.in (renamed from test/testlinks_env.sh.in)0
-rw-r--r--test/test_plugin.sh.in140
-rw-r--r--test/test_swmr.pwsh.in (renamed from test/testswmr.pwsh.in)0
-rw-r--r--test/test_swmr.sh.in (renamed from test/testswmr.sh.in)0
-rw-r--r--test/test_use_cases.sh.in (renamed from test/test_usecases.sh.in)0
-rw-r--r--test/test_vds_env.sh.in (renamed from test/testvds_env.sh.in)0
-rw-r--r--test/test_vds_swmr.pwsh.in (renamed from test/testvdsswmr.pwsh.in)0
-rw-r--r--test/test_vds_swmr.sh.in (renamed from test/testvdsswmr.sh.in)0
-rw-r--r--test/test_vol_plugin.sh.in84
-rw-r--r--test/testhdf5.c1
-rw-r--r--test/testhdf5.h1
-rw-r--r--test/theap.c1081
-rw-r--r--test/tmisc.c6
-rw-r--r--test/trefstr.c10
-rw-r--r--test/tselect.c36
-rw-r--r--test/tvlstr.c10
-rw-r--r--test/twriteorder.c6
-rw-r--r--test/unregister.c2
-rw-r--r--test/use_append_chunk.c6
-rw-r--r--test/use_append_mchunks.c6
-rw-r--r--test/vds_env.c5
-rw-r--r--test/vfd.c2035
-rw-r--r--testpar/CMakeLists.txt1
-rw-r--r--testpar/Makefile.am2
-rw-r--r--testpar/t_2Gio.c277
-rw-r--r--testpar/t_bigio.c37
-rw-r--r--testpar/t_cache.c46
-rw-r--r--testpar/t_cache_image.c14
-rw-r--r--testpar/t_coll_chunk.c5
-rw-r--r--testpar/t_coll_md_read.c93
-rw-r--r--testpar/t_dset.c329
-rw-r--r--testpar/t_file.c16
-rw-r--r--testpar/t_filters_parallel.c4584
-rw-r--r--testpar/t_filters_parallel.h117
-rw-r--r--testpar/t_pflush1.c8
-rw-r--r--testpar/t_pflush2.c8
-rw-r--r--testpar/t_prop.c2
-rw-r--r--testpar/t_vfd.c4055
-rw-r--r--testpar/testphdf5.h4
-rw-r--r--tools/lib/h5diff.c4
-rw-r--r--tools/lib/h5diff_array.c100
-rw-r--r--tools/lib/h5diff_attr.c2
-rw-r--r--tools/lib/h5diff_util.c4
-rw-r--r--tools/lib/h5tools.c4
-rw-r--r--tools/lib/h5tools.h3
-rw-r--r--tools/lib/h5tools_dump.c59
-rw-r--r--tools/lib/h5tools_str.c33
-rw-r--r--tools/lib/h5tools_str.h2
-rw-r--r--tools/lib/h5tools_utils.c4
-rw-r--r--tools/lib/h5tools_utils.h6
-rw-r--r--tools/lib/h5trav.c7
-rw-r--r--tools/src/h5copy/h5copy.c21
-rw-r--r--tools/src/h5diff/h5diff_common.c18
-rw-r--r--tools/src/h5diff/h5diff_common.h2
-rw-r--r--tools/src/h5diff/h5diff_main.c4
-rw-r--r--tools/src/h5diff/ph5diff_main.c2
-rw-r--r--tools/src/h5dump/h5dump.c16
-rw-r--r--tools/src/h5dump/h5dump_ddl.c8
-rw-r--r--tools/src/h5dump/h5dump_xml.c50
-rw-r--r--tools/src/h5format_convert/h5format_convert.c8
-rw-r--r--tools/src/h5import/h5import.c2
-rw-r--r--tools/src/h5jam/h5jam.c8
-rw-r--r--tools/src/h5jam/h5unjam.c6
-rw-r--r--tools/src/h5ls/h5ls.c62
-rw-r--r--tools/src/h5perf/pio_engine.c8
-rw-r--r--tools/src/h5perf/pio_perf.c73
-rw-r--r--tools/src/h5perf/sio_engine.c4
-rw-r--r--tools/src/h5perf/sio_perf.c53
-rw-r--r--tools/src/h5repack/PARALLEL_REPACK_readme.txt2
-rw-r--r--tools/src/h5repack/create_h5file.c4
-rw-r--r--tools/src/h5repack/h5prepack_main.c6
-rw-r--r--tools/src/h5repack/h5repack_copy.c4
-rw-r--r--tools/src/h5repack/h5repack_main.c29
-rw-r--r--tools/src/h5stat/h5stat.c21
-rw-r--r--tools/src/misc/h5clear.c6
-rw-r--r--tools/src/misc/h5delete.c2
-rw-r--r--tools/src/misc/h5mkgrp.c6
-rw-r--r--tools/test/h5copy/h5copygentest.c2
-rw-r--r--tools/test/h5diff/testfiles/h5diff_10.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_600.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_603.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_606.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_612.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_615.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_621.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_622.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_623.txt5
-rw-r--r--tools/test/h5diff/testfiles/h5diff_624.txt5
-rw-r--r--tools/test/h5dump/binread.c2
-rw-r--r--tools/test/h5dump/h5dumpgentest.c6
-rw-r--r--tools/test/h5jam/getub.c8
-rw-r--r--tools/test/h5jam/tellub.c6
-rw-r--r--tools/test/h5repack/h5repackgentest.c2
-rw-r--r--tools/test/h5repack/testfiles/h5repack-help.txt5
-rw-r--r--tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl14
-rw-r--r--tools/test/h5stat/testfiles/h5stat_help1.ddl4
-rw-r--r--tools/test/h5stat/testfiles/h5stat_help2.ddl4
-rw-r--r--tools/test/h5stat/testfiles/h5stat_nofile.ddl4
-rw-r--r--tools/test/misc/h5clear_gentest.c4
-rw-r--r--tools/test/perform/direct_write_perf.c2
-rw-r--r--tools/test/perform/perf_meta.c14
-rw-r--r--tools/test/perform/pio_standalone.c2
-rw-r--r--tools/test/perform/pio_standalone.h3
-rw-r--r--tools/test/perform/sio_standalone.c2
-rw-r--r--tools/test/perform/sio_standalone.h3
-rw-r--r--tools/test/perform/zip_perf.c10
-rw-r--r--tools/testfiles/h5dump-help.txt6
-rw-r--r--tools/testfiles/pbits/tnofilename-with-packed-bits.ddl6
-rw-r--r--tools/testfiles/pbits/tpbitsIncomplete.ddl6
-rw-r--r--tools/testfiles/pbits/tpbitsLengthExceeded.ddl6
-rw-r--r--tools/testfiles/pbits/tpbitsLengthPositive.ddl6
-rw-r--r--tools/testfiles/pbits/tpbitsMaxExceeded.ddl6
-rw-r--r--tools/testfiles/pbits/tpbitsOffsetExceeded.ddl6
-rw-r--r--tools/testfiles/pbits/tpbitsOffsetNegative.ddl6
-rw-r--r--utils/mirror_vfd/mirror_remote.c2
-rw-r--r--utils/mirror_vfd/mirror_remote.h2
-rw-r--r--utils/mirror_vfd/mirror_server.c27
-rw-r--r--utils/mirror_vfd/mirror_writer.c8
-rw-r--r--utils/tools/h5dwalk/h5dwalk.c28
425 files changed, 27813 insertions, 18720 deletions
diff --git a/.gitattributes b/.gitattributes
index 385f805..2ff0dab 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -192,12 +192,12 @@ java/examples/testfiles/examples.intro.H5_CreateGroup.txt -text
java/examples/testfiles/examples.intro.H5_CreateGroupAbsoluteRelative.txt -text
java/examples/testfiles/examples.intro.H5_CreateGroupDataset.txt -text
java/examples/testfiles/examples.intro.H5_ReadWrite.txt -text
-java/lib/ext/slf4j-nop-1.7.25.jar -text svneol=unset#application/zip
-java/lib/ext/slf4j-simple-1.7.25.jar -text svneol=unset#application/zip
+java/lib/ext/slf4j-nop-1.7.33.jar -text svneol=unset#application/zip
+java/lib/ext/slf4j-simple-1.7.33.jar -text svneol=unset#application/zip
java/lib/hamcrest-core.jar -text svneol=unset#application/java-archive
java/lib/junit.jar -text svneol=unset#application/java-archive
java/lib/simplelogger.properties -text
-java/lib/slf4j-api-1.7.25.jar -text svneol=unset#application/zip
+java/lib/slf4j-api-1.7.33.jar -text svneol=unset#application/zip
java/src/CMakeLists.txt -text
java/src/Makefile.am -text
java/src/hdf/CMakeLists.txt -text
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 29e5866..99daf78 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -7,36 +7,36 @@
# Order is important. The last matching pattern has the most precedence.
# So if a pull request only touches javascript files, only these owners
# will be requested to review.
-*.cmake @byrnHDF @derobins
-CMakeLists.txt @byrnHDF @derobins
-CMakeTests.* @byrnHDF @derobins
+*.cmake @byrnHDF @ChristopherHogan @gnuoyd @derobins
+CMakeLists.txt @byrnHDF @ChristopherHogan @gnuoyd @derobins
+CMakeTests.* @byrnHDF @ChristopherHogan @gnuoyd @derobins
-/bin/ @lrknox @derobins @qkoziol
+/bin/ @lrknox @ChristopherHogan @gnuoyd @derobins @qkoziol
-/c++/ @bmribler @byrnHDF @derobins
+/c++/ @bmribler @byrnHDF @ChristopherHogan @gnuoyd @derobins
-/config/ @lrknox @derobins @qkoziol @byrnHDF
+/config/ @lrknox @byrnHDF @ChristopherHogan @gnuoyd @derobins @qkoziol
-/doc/ @gnuoyd @jrmainzer
+/doc/ @ChristopherHogan @gnuoyd @jrmainzer
-/examples/ @lrknox @derobins @bmribler
+/examples/ @lrknox @bmribler @ChristopherHogan @gnuoyd @derobins
/fortran/ @brtnfld @epourmal
-/hl/ @bmribler @byrnHDF @derobins
+/hl/ @bmribler @byrnHDF @ChristopherHogan @gnuoyd @derobins
/java/ @jhendersonHDF @byrnHDF
-/m4/ @lrknox @derobins
+/m4/ @lrknox @ChristopherHogan @gnuoyd @derobins
/release_docs/ @lrknox @bmribler @byrnHDF
-/src/ @jhendersonHDF @derobins @fortnern @qkoziol @soumagne @vchoi-hdfgroup @jrmainzer
+/src/ @jhendersonHDF @fortnern @soumagne @vchoi-hdfgroup @ChristopherHogan @gnuoyd @derobins @jrmainzer @qkoziol
-/test/ @jhendersonHDF @derobins @fortnern @qkoziol @soumagne @vchoi-hdfgroup @jrmainzer
+/test/ @jhendersonHDF @fortnern @soumagne @vchoi-hdfgroup @ChristopherHogan @gnuoyd @derobins @jrmainzer @qkoziol
-/testpar/ @jhendersonHDF @rawarren @jrmainzer @qkoziol
+/testpar/ @jhendersonHDF @ChristopherHogan @gnuoyd @jrmainzer @qkoziol
-/tools/ @byrnHDF @bmribler @derobins
+/tools/ @byrnHDF @bmribler @ChristopherHogan @gnuoyd @derobins
-/utils/ @lrknox @byrnHDF @derobins
+/utils/ @lrknox @byrnHDF @ChristopherHogan @gnuoyd @derobins
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..7c4daaf
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,3 @@
+# These are supported funding model platforms
+
+custom: "https://hdfgroup.org/about-us/donate-to-the-hdf-group/"
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index 163353e..78c1a4f 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -12,4 +12,4 @@ jobs:
- uses: codespell-project/actions-codespell@master
with:
skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c
- ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum
+ ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,ro,oce
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 7b1d0c4..2282a09 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -9,6 +9,9 @@ on:
- '.github/**'
- 'doc/**'
- 'release_docs/**'
+ - 'ACKNOWLEDGEMENTS'
+ - 'COPYING**'
+ - '**.md'
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
@@ -184,7 +187,6 @@ jobs:
if: matrix.generator == 'autogen'
run: |
sh ./autogen.sh
- sh ./bin/chkmanifest
mkdir "${{ runner.workspace }}/build"
cd "${{ runner.workspace }}/build"
$GITHUB_WORKSPACE/configure --enable-shared --${{ matrix.ts }}-threadsafe --${{ matrix.hl }}-hl --${{ matrix.parallel }}-parallel --${{ matrix.cpp }}-cxx --${{ matrix.fortran }}-fortran --${{ matrix.java }}-java
diff --git a/.github/workflows/pr-check.yml b/.github/workflows/pr-check.yml
index 79d5c83..65bf42b 100644
--- a/.github/workflows/pr-check.yml
+++ b/.github/workflows/pr-check.yml
@@ -179,7 +179,6 @@ jobs:
if: matrix.generator == 'autogen'
run: |
sh ./autogen.sh
- sh ./bin/chkmanifest
mkdir "${{ runner.workspace }}/build"
cd "${{ runner.workspace }}/build"
$GITHUB_WORKSPACE/configure --enable-shared --${{ matrix.ts }}-threadsafe --${{ matrix.hl }}-hl --${{ matrix.parallel }}-parallel --${{ matrix.cpp }}-cxx --${{ matrix.fortran }}-fortran --${{ matrix.java }}-java
diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake
index 51ac61c..725390b 100644
--- a/CMakeFilters.cmake
+++ b/CMakeFilters.cmake
@@ -19,7 +19,7 @@ include (FetchContent)
set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)")
set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ)
if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
- option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 1)
+ option (ZLIB_USE_EXTERNAL "Use External Library Building for HDF5_ZLIB" 1)
option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 1)
if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT")
set (ZLIB_URL ${ZLIB_GIT_URL} CACHE STRING "Path to zlib git repository")
@@ -82,7 +82,7 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT)
set (H5_HAVE_ZLIB_H 1)
set (H5_HAVE_LIBZ 1)
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
- message (VERBOSE "Filter ZLIB is built")
+ message (VERBOSE "Filter HDF5_ZLIB is built")
endif ()
else ()
message (FATAL_ERROR " ZLib is Required for ZLib support in HDF5")
@@ -100,7 +100,7 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT)
set (LINK_COMP_LIBS ${LINK_COMP_LIBS} ${ZLIB_STATIC_LIBRARY})
INCLUDE_DIRECTORIES (${ZLIB_INCLUDE_DIRS})
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
- message (VERBOSE "Filter ZLIB is ON")
+ message (VERBOSE "Filter HDF5_ZLIB is ON")
endif ()
endif ()
@@ -149,9 +149,9 @@ if (HDF5_ENABLE_SZIP_SUPPORT)
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
message (VERBOSE "... with library AEC")
endif ()
- set (SZ_PACKAGE_NAME ${LIBAEC_PACKAGE_NAME})
+ set (SZIP_PACKAGE_NAME ${LIBAEC_PACKAGE_NAME})
else ()
- set (SZ_PACKAGE_NAME ${SZIP_PACKAGE_NAME})
+ set (SZIP_PACKAGE_NAME ${SZIP_PACKAGE_NAME})
endif ()
else ()
message (FATAL_ERROR "SZIP is Required for SZIP support in HDF5")
diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake
index b506f05..b5efebf 100644
--- a/CMakeInstallation.cmake
+++ b/CMakeInstallation.cmake
@@ -182,7 +182,7 @@ if (HDF5_PACK_EXAMPLES)
endif ()
#-----------------------------------------------------------------------------
-# Configure the README.txt file for the binary package
+# Configure the README.md file for the binary package
#-----------------------------------------------------------------------------
HDF_README_PROPERTIES(HDF5_BUILD_FORTRAN)
@@ -395,13 +395,19 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}")
set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON)
+ list (APPEND CPACK_GENERATOR "DEB")
set (CPACK_DEBIAN_PACKAGE_SECTION "Libraries")
set (CPACK_DEBIAN_PACKAGE_MAINTAINER "${HDF5_PACKAGE_BUGREPORT}")
-# list (APPEND CPACK_GENERATOR "RPM")
+ list (APPEND CPACK_GENERATOR "RPM")
set (CPACK_RPM_PACKAGE_RELEASE "1")
+ set (CPACK_RPM_PACKAGE_RELEASE_DIST ON)
set (CPACK_RPM_COMPONENT_INSTALL ON)
set (CPACK_RPM_PACKAGE_RELOCATABLE ON)
+ set (CPACK_RPM_FILE_NAME "RPM-DEFAULT")
+ set (CPACK_RPM_PACKAGE_NAME "${CPACK_PACKAGE_NAME}")
+ set (CPACK_RPM_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION}")
+ set (CPACK_RPM_PACKAGE_VENDOR "${CPACK_PACKAGE_VENDOR}")
set (CPACK_RPM_PACKAGE_LICENSE "BSD-style")
set (CPACK_RPM_PACKAGE_GROUP "Development/Libraries")
set (CPACK_RPM_PACKAGE_URL "${HDF5_PACKAGE_URL}")
@@ -442,11 +448,11 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
if (ZLIB_FOUND AND ZLIB_USE_EXTERNAL)
if (WIN32)
- set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;ALL;/")
+ set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};HDF5_ZLIB;ALL;/")
else ()
- set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;libraries;/")
- set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;headers;/")
- set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;configinstall;/")
+ set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};HDF5_ZLIB;libraries;/")
+ set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};HDF5_ZLIB;headers;/")
+ set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};HDF5_ZLIB;configinstall;/")
endif ()
endif ()
if (SZIP_FOUND AND SZIP_USE_EXTERNAL)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index dcee4f9..cad378b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -229,9 +229,9 @@ set (HDF5_JAVA_JNI_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/jni)
set (HDF5_JAVA_HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/hdf)
set (HDF5_JAVA_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/java/test)
set (HDF5_JAVA_LIB_DIR ${HDF5_SOURCE_DIR}/java/lib)
-set (HDF5_JAVA_LOGGING_JAR ${HDF5_SOURCE_DIR}/java/lib/slf4j-api-1.7.25.jar)
-set (HDF5_JAVA_LOGGING_NOP_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-nop-1.7.25.jar)
-set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-1.7.25.jar)
+set (HDF5_JAVA_LOGGING_JAR ${HDF5_SOURCE_DIR}/java/lib/slf4j-api-1.7.33.jar)
+set (HDF5_JAVA_LOGGING_NOP_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-nop-1.7.33.jar)
+set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-1.7.33.jar)
set (HDF5_DOXYGEN_DIR ${HDF5_SOURCE_DIR}/doxygen)
#-----------------------------------------------------------------------------
@@ -708,10 +708,14 @@ if (HDF5_ENABLE_PARALLEL)
# Used by Parallel Compression feature
set (PARALLEL_FILTERED_WRITES ON)
- CHECK_SYMBOL_EXISTS (MPI_Mprobe "mpi.h" H5_HAVE_MPI_Mprobe)
- CHECK_SYMBOL_EXISTS (MPI_Imrecv "mpi.h" H5_HAVE_MPI_Imrecv)
- if (NOT H5_HAVE_MPI_Mprobe OR NOT H5_HAVE_MPI_Imrecv)
- message (WARNING "The MPI_Mprobe and/or MPI_Imrecv functions could not be located.
+ CHECK_SYMBOL_EXISTS (MPI_Ibarrier "mpi.h" H5_HAVE_MPI_Ibarrier)
+ CHECK_SYMBOL_EXISTS (MPI_Issend "mpi.h" H5_HAVE_MPI_Issend)
+ CHECK_SYMBOL_EXISTS (MPI_Iprobe "mpi.h" H5_HAVE_MPI_Iprobe)
+ CHECK_SYMBOL_EXISTS (MPI_Irecv "mpi.h" H5_HAVE_MPI_Irecv)
+ if (H5_HAVE_MPI_Ibarrier AND H5_HAVE_MPI_Issend AND H5_HAVE_MPI_Iprobe AND H5_HAVE_MPI_Irecv)
+ set (H5_HAVE_PARALLEL_FILTERED_WRITES 1)
+ else ()
+ message (WARNING "The MPI_Ibarrier/MPI_Issend/MPI_Iprobe/MPI_Irecv functions could not be located.
Parallel writes of filtered data will be disabled.")
set (PARALLEL_FILTERED_WRITES OFF)
endif ()
@@ -895,10 +899,10 @@ add_subdirectory (src)
if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
if (ZLIB_FOUND AND ZLIB_USE_EXTERNAL)
if (NOT ONLY_SHARED_LIBS)
- add_dependencies (${HDF5_LIB_TARGET} ZLIB)
+ add_dependencies (${HDF5_LIB_TARGET} HDF5_ZLIB)
endif ()
if (BUILD_SHARED_LIBS)
- add_dependencies (${HDF5_LIBSH_TARGET} ZLIB)
+ add_dependencies (${HDF5_LIBSH_TARGET} HDF5_ZLIB)
endif ()
endif ()
if (SZIP_FOUND AND SZIP_USE_EXTERNAL)
diff --git a/MANIFEST b/MANIFEST
deleted file mode 100644
index 93cfd98..0000000
--- a/MANIFEST
+++ /dev/null
@@ -1,3886 +0,0 @@
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-#------------------------------------------------------------------------------
-# This is the list of files that are part of HDF5 source distribution.
-# All files have a `./' prefix and appear in lexicographic order.
-# Lines that end with _DO_NOT_DISTRIBUTE_ will not be included in a
-# release. Blank lines and comments are ignored. Comments must start
-# in column one with a '#'.
-#------------------------------------------------------------------------------
-
-./.gitattributes _DO_NOT_DISTRIBUTE_
-./.gitignore _DO_NOT_DISTRIBUTE_
-./.autom4te.cfg _DO_NOT_DISTRIBUTE_
-./.h5chkright.ini _DO_NOT_DISTRIBUTE_
-./ACKNOWLEDGMENTS
-./COPYING
-./COPYING_LBNL_HDF5
-./MANIFEST
-./Makefile.dist
-./Makefile.am
-./README.txt
-./acsite.m4
-./autogen.sh
-./configure.ac
-
-./.clang-format
-./.github/CODEOWNERS _DO_NOT_DISTRIBUTE_
-./.github/workflows/clang-format-fix.yml _DO_NOT_DISTRIBUTE_
-./.github/workflows/clang-format-check.yml _DO_NOT_DISTRIBUTE_
-./.github/workflows/main.yml _DO_NOT_DISTRIBUTE_
-./.github/workflows/pr-check.yml _DO_NOT_DISTRIBUTE_
-./.github/workflows/codespell.yml _DO_NOT_DISTRIBUTE_
-
-./m4/aclocal_fc.m4
-./m4/aclocal_fc.f90
-./m4/ax_check_class.m4
-./m4/ax_check_classpath.m4
-./m4/ax_check_java_home.m4
-./m4/ax_check_junit.m4
-./m4/ax_check_rqrd_class.m4
-./m4/ax_java_check_class.m4
-./m4/ax_java_options.m4
-./m4/ax_jni_include_dir.m4
-./m4/ax_prog_doxygen.m4
-./m4/ax_prog_jar.m4
-./m4/ax_prog_java_cc.m4
-./m4/ax_prog_java_works.m4
-./m4/ax_prog_java.m4
-./m4/ax_prog_javac_works.m4
-./m4/ax_prog_javac.m4
-./m4/ax_prog_javadoc.m4
-./m4/ax_prog_javah.m4
-./m4/ax_try_compile_java.m4
-./m4/ax_try_run_java.m4
-
-./bin/bbrelease _DO_NOT_DISTRIBUTE_
-./bin/buildhdf5
-./bin/checkapi _DO_NOT_DISTRIBUTE_
-./bin/checkposix _DO_NOT_DISTRIBUTE_
-./bin/chkconfigure _DO_NOT_DISTRIBUTE_
-./bin/chkcopyright _DO_NOT_DISTRIBUTE_
-./bin/chkmanifest
-./bin/cmakehdf5
-./bin/debug-ohdr _DO_NOT_DISTRIBUTE_
-./bin/dependencies
-./bin/deploy
-./bin/distdep
-./bin/errors _DO_NOT_DISTRIBUTE_
-./bin/format_source
-./bin/format_source_patch
-./bin/genparser
-./bin/gcov_script _DO_NOT_DISTRIBUTE_
-./bin/h5cc.in
-./bin/h5redeploy.in
-./bin/h5vers
-./bin/iostats
-./bin/locate_sw
-./bin/make_err
-./bin/make_overflow
-./bin/make_vers
-./bin/Makefile.am
-./bin/makehelp
-./bin/mkdirs
-./bin/newer
-./bin/output_filter.sh
-./bin/README _DO_NOT_DISTRIBUTE_
-./bin/release
-./bin/restore.sh
-./bin/runtest _DO_NOT_DISTRIBUTE_
-./bin/runbkgprog _DO_NOT_DISTRIBUTE_
-./bin/snapshot
-./bin/snapshot_version _DO_NOT_DISTRIBUTE_
-./bin/switch_maint_mode _DO_NOT_DISTRIBUTE_
-./bin/timekeeper _DO_NOT_DISTRIBUTE_
-./bin/trace
-./bin/warnhist _DO_NOT_DISTRIBUTE_
-./bin/yodconfigure
-./bin/batch/ctestP.lsf.in.cmake
-./bin/batch/ctestP.sl.in.cmake
-./bin/batch/ctestS.lsf.in.cmake
-./bin/batch/ctestS.sl.in.cmake
-./bin/batch/knl_ctestP.sl.in.cmake
-./bin/batch/knl_ctestS.sl.in.cmake
-./bin/batch/knl_H5detect.sl.in.cmake
-./bin/batch/ctest.qsub.in.cmake
-./bin/batch/ray_ctestP.lsf.in.cmake
-./bin/batch/ray_ctestS.lsf.in.cmake
-./bin/batch/raybsub
-./bin/pkgscrpts/h5rmflags _DO_NOT_DISTRIBUTE_
-./bin/pkgscrpts/makeHDF5BinaryTarfiles.pl _DO_NOT_DISTRIBUTE_
-./bin/pkgscrpts/makeInternalREADME.pl _DO_NOT_DISTRIBUTE_
-./bin/pkgscrpts/makeOuterREADME.pl _DO_NOT_DISTRIBUTE_
-
-./config/BlankForm
-./config/apple
-./config/cce-fflags
-./config/cce-flags
-./config/commence.am
-./config/conclude.am
-./config/conclude_fc.am
-./config/examples.am
-./config/freebsd
-./config/gnu-cxxflags
-./config/gnu-fflags
-./config/gnu-flags
-./config/cygwin
-./config/ibm-aix
-./config/ibm-flags
-./config/intel-cxxflags
-./config/intel-fflags
-./config/intel-flags
-./config/libhdf5.pc.in
-./config/linux-gnu
-./config/linux-gnuaout
-./config/linux-gnueabihf
-./config/linux-gnulibc1
-./config/linux-gnulibc2
-./config/lt_vers.am
-./config/Makefile.am.blank
-./config/netbsd
-./config/pgi-cxxflags
-./config/pgi-fflags
-./config/pgi-flags
-./config/solaris
-
-#warnings files for both autotools and CMake
-./config/clang-cxxflags
-./config/clang-flags
-./config/clang-warnings/developer-general
-./config/clang-warnings/error-general
-./config/clang-warnings/general
-./config/clang-warnings/no-developer-general
-./config/gnu-warnings/4.8-4.last
-./config/gnu-warnings/4.8
-./config/gnu-warnings/4.9
-./config/gnu-warnings/5
-./config/gnu-warnings/6
-./config/gnu-warnings/7
-./config/gnu-warnings/8
-./config/gnu-warnings/9
-./config/gnu-warnings/9.3
-./config/gnu-warnings/cxx-general
-./config/gnu-warnings/cxx-4.8
-./config/gnu-warnings/cxx-4.9
-./config/gnu-warnings/cxx-5
-./config/gnu-warnings/cxx-9
-./config/gnu-warnings/cxx-developer-4.8
-./config/gnu-warnings/cxx-error-5
-./config/gnu-warnings/cxx-error-general
-./config/gnu-warnings/developer-4.8
-./config/gnu-warnings/developer-7
-./config/gnu-warnings/developer-8
-./config/gnu-warnings/developer-10
-./config/gnu-warnings/developer-general
-./config/gnu-warnings/error-5
-./config/gnu-warnings/error-8
-./config/gnu-warnings/error-general
-./config/gnu-warnings/general
-./config/gnu-warnings/gfort-general
-./config/gnu-warnings/gfort-4.8
-./config/gnu-warnings/developer-gfort-5
-./config/gnu-warnings/gfort-6
-./config/gnu-warnings/gfort-8
-./config/gnu-warnings/no-cxx-developer-4.8
-./config/gnu-warnings/no-developer-4.8
-./config/gnu-warnings/no-developer-8
-./config/gnu-warnings/no-developer-general
-./config/intel-warnings/15
-./config/intel-warnings/18
-./config/intel-warnings/developer-general
-./config/intel-warnings/general
-./config/intel-warnings/win-developer-general
-./config/intel-warnings/win-general
-./config/intel-warnings/ifort-general
-
-./config/cmake/FindMFU.cmake
-./config/cmake/FindDTCMP.cmake
-./config/cmake/FindCIRCLE.cmake
-
-./config/site-specific/BlankForm
-
-./doc/branches-explained.md
-./doc/code-conventions.md
-./doc/contributing.md
-./doc/library-init-shutdown.md
-./doxygen/aliases
-./doxygen/CMakeLists.txt
-./doxygen/Doxyfile.in
-./doxygen/dox/APIVersions.dox
-./doxygen/dox/About.dox
-./doxygen/dox/Cookbook.dox
-./doxygen/dox/DDLBNF110.dox
-./doxygen/dox/DDLBNF112.dox
-./doxygen/dox/FTS.dox
-./doxygen/dox/GettingStarted.dox
-./doxygen/dox/H5AC_cache_config_t.dox
-./doxygen/dox/MetadataCachingInHDF5.dox
-./doxygen/dox/Overview.dox
-./doxygen/dox/ReferenceManual.dox
-./doxygen/dox/RFC.dox
-./doxygen/dox/Specifications.dox
-./doxygen/dox/TechnicalNotes.dox
-./doxygen/dox/api-compat-macros.dox
-./doxygen/dox/maybe_metadata_reads.dox
-./doxygen/dox/rm-template.dox
-./doxygen/dox/cookbook/Accessibility.c
-./doxygen/dox/cookbook/Accessibility.dox
-./doxygen/dox/cookbook/Attributes.c
-./doxygen/dox/cookbook/Attributes.dox
-./doxygen/dox/cookbook/Files.c
-./doxygen/dox/cookbook/Files.dox
-./doxygen/dox/cookbook/Performance.dox
-./doxygen/examples/DebuggingHDF5Applications.html
-./doxygen/examples/FF-IH_FileGroup.gif
-./doxygen/examples/FF-IH_FileObject.gif
-./doxygen/examples/FileFormat.html
-./doxygen/examples/FileFormatSpecChunkDiagram.jpg
-./doxygen/examples/Filters.html
-./doxygen/examples/H5Pset_metadata_read_attempts.c
-./doxygen/examples/H5Pset_object_flush_cb.c
-./doxygen/examples/H5.format.1.0.html
-./doxygen/examples/H5.format.1.1.html
-./doxygen/examples/H5.format.2.0.html
-./doxygen/examples/H5.format.html
-./doxygen/examples/H5A_examples.c
-./doxygen/examples/H5D_examples.c
-./doxygen/examples/H5E_examples.c
-./doxygen/examples/H5Fclose.c
-./doxygen/examples/H5Fcreate.c
-./doxygen/examples/H5F_examples.c
-./doxygen/examples/H5G_examples.c
-./doxygen/examples/H5I_examples.c
-./doxygen/examples/H5L_examples.c
-./doxygen/examples/H5O_examples.c
-./doxygen/examples/H5PL_examples.c
-./doxygen/examples/H5Pget_metadata_read_attempts.1.c
-./doxygen/examples/H5Pget_metadata_read_attempts.2.c
-./doxygen/examples/H5Pget_metadata_read_attempts.3.c
-./doxygen/examples/H5Pget_object_flush_cb.c
-./doxygen/examples/H5P_examples.c
-./doxygen/examples/H5R_examples.c
-./doxygen/examples/H5S_examples.c
-./doxygen/examples/H5T_examples.c
-./doxygen/examples/H5Z_examples.c
-./doxygen/examples/H5_examples.c
-./doxygen/examples/ImageSpec.html
-./doxygen/examples/IOFlow.html
-./doxygen/examples/PaletteExample1.gif
-./doxygen/examples/Palettes.fm.anc.gif
-./doxygen/examples/TableSpec.html
-./doxygen/examples/ThreadSafeLibrary.html
-./doxygen/examples/VFL.html
-./doxygen/examples/hello_hdf5.c
-./doxygen/hdf5_footer.html
-./doxygen/hdf5_header.html
-./doxygen/hdf5_navtree_hacks.js
-./doxygen/hdf5doxy.css
-./doxygen/hdf5doxy_layout.xml
-./doxygen/img/FF-IH_FileGroup.gif
-./doxygen/img/FF-IH_FileObject.gif
-./doxygen/img/FileFormatSpecChunkDiagram.jpg
-./doxygen/img/HDFG-logo.png
-./doxygen/img/IOFlow.gif
-./doxygen/img/IOFlow2.gif
-./doxygen/img/IOFlow3.gif
-./doxygen/img/PaletteExample1.gif
-./doxygen/img/Palettes.fm.anc.gif
-./doxygen/img/ftv2node.png
-./doxygen/img/ftv2pnode.png
-
-./examples/Attributes.txt
-./examples/Makefile.am
-./examples/h5_chunk_read.c
-./examples/h5_compound.c
-./examples/h5_crtgrpd.c
-./examples/h5_debug_trace.c
-./examples/h5_subset.c
-./examples/h5_cmprss.c
-./examples/h5_rdwt.c
-./examples/h5_crtgrpar.c
-./examples/h5_extend.c
-./examples/h5_crtatt.c
-./examples/h5_crtgrp.c
-./examples/h5_crtdat.c
-./examples/h5_drivers.c
-./examples/h5_dtransform.c
-./examples/h5_elink_unix2win.c
-./examples/h5_extend_write.c
-./examples/h5_extlink.c
-./examples/h5_group.c
-./examples/h5_interm_group.c
-./examples/h5_read.c
-./examples/h5_write.c
-./examples/h5_select.c
-./examples/h5_attribute.c
-./examples/h5_mount.c
-./examples/h5_ref_compat.c
-./examples/h5_ref_extern.c
-./examples/h5_reference_deprec.c
-./examples/h5_ref2reg_deprec.c
-./examples/h5_shared_mesg.c
-./examples/ph5example.c
-./examples/h5_vds.c
-./examples/h5_vds-exc.c
-./examples/h5_vds-exclim.c
-./examples/h5_vds-eiger.c
-./examples/h5_vds-simpleIO.c
-./examples/h5_vds-percival.c
-./examples/h5_vds-percival-unlim.c
-./examples/h5_vds-percival-unlim-maxmin.c
-./examples/testh5cc.sh.in
-./examples/README
-
-
-#------------------------------------------------------------------------------
-#
-# Begin Fortran interface
-#
-#------------------------------------------------------------------------------
-
-./fortran/Makefile.am
-./fortran/robodoc.rc
-
-./fortran/examples/Makefile.am
-./fortran/examples/compound.f90
-./fortran/examples/compound_fortran2003.f90
-./fortran/examples/compound_complex_fortran2003.f90
-./fortran/examples/h5_cmprss.f90
-./fortran/examples/h5_crtatt.f90
-./fortran/examples/h5_crtdat.f90
-./fortran/examples/h5_crtgrp.f90
-./fortran/examples/h5_crtgrpar.f90
-./fortran/examples/h5_crtgrpd.f90
-./hl/fortran/examples/exlite.f90
-./fortran/examples/h5_extend.f90
-./hl/fortran/examples/ex_ds1.f90
-./fortran/examples/h5_rdwt.f90
-./fortran/examples/h5_subset.f90
-./fortran/examples/hyperslab.f90
-./fortran/examples/mountexample.f90
-./fortran/examples/ph5example.f90
-./fortran/examples/refobjexample.f90
-./fortran/examples/refregexample.f90
-./fortran/examples/run-fortran-ex.sh.in
-./fortran/examples/selectele.f90
-./fortran/examples/testh5fc.sh.in
-./fortran/examples/nested_derived_type.f90
-./fortran/examples/rwdset_fortran2003.f90
-
-./fortran/src/H5_buildiface.F90
-./fortran/src/H5_f.c
-./fortran/src/H5_ff.F90
-./fortran/src/H5Af.c
-./fortran/src/H5Aff.F90
-./fortran/src/H5Df.c
-./fortran/src/H5Dff.F90
-./fortran/src/H5Ef.c
-./fortran/src/H5Eff.F90
-./fortran/src/H5Ff.c
-./fortran/src/H5Fff.F90
-./fortran/src/H5Gf.c
-./fortran/src/H5Gff.F90
-./fortran/src/H5If.c
-./fortran/src/H5Iff.F90
-./fortran/src/H5Lf.c
-./fortran/src/H5Lff.F90
-./fortran/src/H5Of.c
-./fortran/src/H5Off.F90
-./fortran/src/H5Pf.c
-./fortran/src/H5Pff.F90
-./fortran/src/H5Rf.c
-./fortran/src/H5Rff.F90
-./fortran/src/H5Sf.c
-./fortran/src/H5Sff.F90
-./fortran/src/H5Tf.c
-./fortran/src/H5Tff.F90
-./fortran/src/H5VLff.F90
-./fortran/src/H5Zf.c
-./fortran/src/H5Zff.F90
-./fortran/src/H5config_f.inc.cmake
-./fortran/src/H5config_f.inc.in
-./fortran/src/H5fort_type_defines.h.cmake
-./fortran/src/H5fort_type_defines.h.in
-./fortran/src/H5f90.h
-./fortran/src/H5f90global.F90
-./fortran/src/H5f90i.h
-./fortran/src/H5f90kit.c
-./fortran/src/H5fortkit.F90
-./fortran/src/H5f90proto.h
-./fortran/src/H5match_types.c
-./fortran/src/HDF5.F90
-./fortran/src/Makefile.am
-./fortran/src/README
-./fortran/src/h5fc.in
-./fortran/src/hdf5_fortrandll.def.in
-
-./fortran/test/Makefile.am
-./fortran/test/vol_connector.F90
-./fortran/test/fflush1.F90
-./fortran/test/fflush2.F90
-./fortran/test/fortranlib_test.F90
-./fortran/test/fortranlib_test_1_8.F90
-./fortran/test/fortranlib_test_F03.F90
-./fortran/test/t.c
-./fortran/test/t.h
-./fortran/test/tf.F90
-./fortran/test/tH5A.F90
-./fortran/test/tH5A_1_8.F90
-./fortran/test/tH5D.F90
-./fortran/test/tH5E_F03.F90
-./fortran/test/tH5E.F90
-./fortran/test/tH5F.F90
-./fortran/test/tH5F_F03.F90
-./fortran/test/tH5G.F90
-./fortran/test/tH5G_1_8.F90
-./fortran/test/tH5I.F90
-./fortran/test/tH5L_F03.F90
-./fortran/test/tH5MISC_1_8.F90
-./fortran/test/tH5O.F90
-./fortran/test/tH5O_F03.F90
-./fortran/test/tH5P_F03.F90
-./fortran/test/tH5P.F90
-./fortran/test/tH5R.F90
-./fortran/test/tH5S.F90
-./fortran/test/tH5Sselect.F90
-./fortran/test/tH5T_F03.F90
-./fortran/test/tH5T.F90
-./fortran/test/tH5VL.F90
-./fortran/test/tH5Z.F90
-./fortran/test/tHDF5_1_8.F90
-./fortran/test/tHDF5_F03.F90
-./fortran/test/tHDF5.F90
-./fortran/test/H5_test_buildiface.F90
-
-./fortran/testpar/Makefile.am
-./fortran/testpar/ptest.f90
-./fortran/testpar/hyper.f90
-./fortran/testpar/mdset.f90
-
-#------------------------------------------------------------------------------
-#
-# End Fortran interface
-#
-#------------------------------------------------------------------------------
-
-#------------------------------------------------------------------------------
-#
-# Begin C++ interface
-#
-#------------------------------------------------------------------------------
-
-./c++/Makefile.am
-
-./c++/examples/chunks.cpp
-./c++/examples/compound.cpp
-./c++/examples/create.cpp
-./c++/examples/expected.out
-./c++/examples/extend_ds.cpp
-./c++/examples/h5group.cpp
-./c++/examples/h5tutr_cmprss.cpp
-./c++/examples/h5tutr_crtatt.cpp
-./c++/examples/h5tutr_crtdat.cpp
-./c++/examples/h5tutr_crtgrpar.cpp
-./c++/examples/h5tutr_crtgrp.cpp
-./c++/examples/h5tutr_crtgrpd.cpp
-./c++/examples/h5tutr_extend.cpp
-./c++/examples/h5tutr_rdwt.cpp
-./c++/examples/h5tutr_subset.cpp
-./c++/examples/readdata.cpp
-./c++/examples/testh5c++.sh.in
-./c++/examples/writedata.cpp
-./c++/examples/Makefile.am
-./c++/examples/run-c++-ex.sh.in
-
-./c++/src/H5AbstractDs.cpp
-./c++/src/H5AbstractDs.h
-./c++/src/H5Alltypes.h
-./c++/src/H5ArrayType.cpp
-./c++/src/H5ArrayType.h
-./c++/src/H5AtomType.cpp
-./c++/src/H5AtomType.h
-./c++/src/H5Attribute.cpp
-./c++/src/H5Attribute.h
-./c++/src/H5Classes.h
-./c++/src/H5CommonFG.cpp
-./c++/src/H5CommonFG.h
-./c++/src/H5CompType.cpp
-./c++/src/H5CompType.h
-./c++/src/H5Cpp.h
-./c++/src/H5CppDoc.h
-./c++/src/H5DataSet.cpp
-./c++/src/H5DataSet.h
-./c++/src/H5DataSpace.cpp
-./c++/src/H5DataSpace.h
-./c++/src/H5DataType.cpp
-./c++/src/H5DataType.h
-./c++/src/H5DaccProp.cpp
-./c++/src/H5DaccProp.h
-./c++/src/H5DcreatProp.cpp
-./c++/src/H5DcreatProp.h
-./c++/src/H5DxferProp.cpp
-./c++/src/H5DxferProp.h
-./c++/src/H5EnumType.cpp
-./c++/src/H5EnumType.h
-./c++/src/H5Exception.cpp
-./c++/src/H5Exception.h
-./c++/src/H5FaccProp.cpp
-./c++/src/H5FaccProp.h
-./c++/src/H5FcreatProp.cpp
-./c++/src/H5FcreatProp.h
-./c++/src/H5File.cpp
-./c++/src/H5File.h
-./c++/src/H5FloatType.cpp
-./c++/src/H5FloatType.h
-./c++/src/H5Group.cpp
-./c++/src/H5Group.h
-./c++/src/H5IdComponent.cpp
-./c++/src/H5IdComponent.h
-./c++/src/H5Include.h
-./c++/src/H5IntType.cpp
-./c++/src/H5IntType.h
-./c++/src/H5LaccProp.cpp
-./c++/src/H5LaccProp.h
-./c++/src/H5LcreatProp.cpp
-./c++/src/H5LcreatProp.h
-./c++/src/H5Library.cpp
-./c++/src/H5Library.h
-./c++/src/H5Location.cpp
-./c++/src/H5Location.h
-./c++/src/H5Object.cpp
-./c++/src/H5Object.h
-./c++/src/H5OcreatProp.cpp
-./c++/src/H5OcreatProp.h
-./c++/src/H5PredType.cpp
-./c++/src/H5PredType.h
-./c++/src/H5PropList.cpp
-./c++/src/H5PropList.h
-./c++/src/H5StrType.cpp
-./c++/src/H5StrType.h
-./c++/src/H5VarLenType.cpp
-./c++/src/H5VarLenType.h
-./c++/src/Makefile.am
-./c++/src/RM_stylesheet.css
-./c++/src/C2Cppfunction_map.htm
-./c++/src/cpp_doc_config
-./c++/src/h5c++.in
-./c++/src/footer.html
-./c++/src/header.html
-./c++/src/header_files/filelist.xml
-./c++/src/header_files/hdf_logo.jpg
-./c++/src/header_files/help.jpg
-./c++/src/header_files/image001.jpg
-./c++/src/header_files/image002.jpg
-
-./c++/test/H5srcdir_str.h.in
-./c++/test/Makefile.am
-./c++/test/dsets.cpp
-./c++/test/h5cpputil.cpp
-./c++/test/h5cpputil.h
-./c++/test/tarray.cpp
-./c++/test/tattr.cpp
-./c++/test/tcompound.cpp
-./c++/test/tdspl.cpp
-./c++/test/testhdf5.cpp
-./c++/test/tfile.cpp
-./c++/test/tfilter.cpp
-./c++/test/th5s.cpp
-./c++/test/th5s.h5
-./c++/test/titerate.cpp
-./c++/test/tlinks.cpp
-./c++/test/tobject.cpp
-./c++/test/ttypes.cpp
-./c++/test/trefer.cpp
-./c++/test/tvlstr.cpp
-
-#------------------------------------------------------------------------------
-#
-# End C++ interface
-#
-#------------------------------------------------------------------------------
-
-./release_docs/HISTORY-1_0-1_8_0_rc3.txt
-./release_docs/HISTORY-1_8_0-1_10_0.txt
-./release_docs/HISTORY-1_10_0-1_12_0.txt
-./release_docs/HISTORY-1_8.txt
-./release_docs/HISTORY-1_10.txt
-./release_docs/HISTORY-1_12.txt
-./release_docs/INSTALL
-./release_docs/INSTALL_CMake.txt
-./release_docs/INSTALL_Cygwin.txt
-./release_docs/INSTALL_parallel
-./release_docs/INSTALL_Warnings.txt
-./release_docs/INSTALL_Windows.txt
-./release_docs/README_HDF5_CMake
-./release_docs/README_HPC
-./release_docs/RELEASE.txt
-./release_docs/USING_HDF5_CMake.txt
-./release_docs/USING_HDF5_VS.txt
-
-./src/.indent.pro _DO_NOT_DISTRIBUTE_
-./src/hdf5.lnt _DO_NOT_DISTRIBUTE_
-./src/hdf5-win.lnt _DO_NOT_DISTRIBUTE_
-./src/hdf5-lin.lnt _DO_NOT_DISTRIBUTE_
-./src/H5.c
-./src/H5checksum.c
-./src/H5dbg.c
-./src/H5api_adpt.h
-./src/H5err.txt
-./src/H5detect.c
-./src/H5make_libsettings.c
-./src/H5module.h
-./src/H5mpi.c
-./src/H5overflow.txt
-./src/H5private.h
-./src/H5public.h
-./src/H5system.c
-./src/H5timer.c
-./src/H5trace.c
-./src/H5vers.txt
-./src/H5A.c
-./src/H5Abtree2.c
-./src/H5Adense.c
-./src/H5Adeprec.c
-./src/H5Aint.c
-./src/H5Amodule.h
-./src/H5Atest.c
-./src/H5Apkg.h
-./src/H5Aprivate.h
-./src/H5Apublic.h
-./src/H5AC.c
-./src/H5ACdbg.c
-./src/H5ACmodule.h
-./src/H5ACmpio.c
-./src/H5ACpkg.h
-./src/H5ACprivate.h
-./src/H5ACpublic.h
-./src/H5ACproxy_entry.c
-./src/H5B.c
-./src/H5Bcache.c
-./src/H5Bdbg.c
-./src/H5Bmodule.h
-./src/H5Bpkg.h
-./src/H5Bprivate.h
-./src/H5B2.c
-./src/H5B2cache.c
-./src/H5B2dbg.c
-./src/H5B2hdr.c
-./src/H5B2int.c
-./src/H5B2internal.c
-./src/H5B2leaf.c
-./src/H5B2module.h
-./src/H5B2pkg.h
-./src/H5B2private.h
-./src/H5B2stat.c
-./src/H5B2test.c
-./src/H5C.c
-./src/H5Cdbg.c
-./src/H5Cepoch.c
-./src/H5Cimage.c
-./src/H5Clog.c
-./src/H5Clog.h
-./src/H5Clog_json.c
-./src/H5Clog_trace.c
-./src/H5Cmodule.h
-./src/H5Cmpio.c
-./src/H5Cpkg.h
-./src/H5Cprefetched.c
-./src/H5Cprivate.h
-./src/H5Cpublic.h
-./src/H5Cquery.c
-./src/H5Ctag.c
-./src/H5Ctest.c
-./src/H5CS.c
-./src/H5CSprivate.h
-./src/H5CX.c
-./src/H5CXmodule.h
-./src/H5CXprivate.h
-./src/H5D.c
-./src/H5Dbtree.c
-./src/H5Dbtree2.c
-./src/H5Dchunk.c
-./src/H5Dcompact.c
-./src/H5Dcontig.c
-./src/H5Ddbg.c
-./src/H5Ddeprec.c
-./src/H5Dearray.c
-./src/H5Defl.c
-./src/H5Dfarray.c
-./src/H5Dfill.c
-./src/H5Dint.c
-./src/H5Dio.c
-./src/H5Dlayout.c
-./src/H5Dmodule.h
-./src/H5Dmpio.c
-./src/H5Dnone.c
-./src/H5Doh.c
-./src/H5Dpkg.h
-./src/H5Dprivate.h
-./src/H5Dpublic.h
-./src/H5Dscatgath.c
-./src/H5Dselect.c
-./src/H5Dsingle.c
-./src/H5Dtest.c
-./src/H5Dvirtual.c
-./src/H5E.c
-./src/H5Edeprec.c
-./src/H5Eint.c
-./src/H5Emodule.h
-./src/H5Epkg.h
-./src/H5Eprivate.h
-./src/H5Epublic.h
-./src/H5EA.c
-./src/H5EAcache.c
-./src/H5EAdbg.c
-./src/H5EAdblkpage.c
-./src/H5EAdblock.c
-./src/H5EAhdr.c
-./src/H5EAiblock.c
-./src/H5EAint.c
-./src/H5EAmodule.h
-./src/H5EApkg.h
-./src/H5EAprivate.h
-./src/H5EAsblock.c
-./src/H5EAstat.c
-./src/H5EAtest.c
-./src/H5ES.c
-./src/H5ESdevelop.h
-./src/H5ESevent.c
-./src/H5ESint.c
-./src/H5ESlist.c
-./src/H5ESmodule.h
-./src/H5ESpkg.h
-./src/H5ESprivate.h
-./src/H5ESpublic.h
-./src/H5F.c
-./src/H5Faccum.c
-./src/H5Fcwfs.c
-./src/H5Fdbg.c
-./src/H5Fdeprec.c
-./src/H5Fefc.c
-./src/H5Ffake.c
-./src/H5Fint.c
-./src/H5Fio.c
-./src/H5Fmodule.h
-./src/H5Fmount.c
-./src/H5Fmpi.c
-./src/H5Fquery.c
-./src/H5Fsfile.c
-./src/H5Fspace.c
-./src/H5Fsuper.c
-./src/H5Fsuper_cache.c
-./src/H5Fpkg.h
-./src/H5Fprivate.h
-./src/H5Fpublic.h
-./src/H5Ftest.c
-./src/H5FA.c
-./src/H5FAcache.c
-./src/H5FAdbg.c
-./src/H5FAdblkpage.c
-./src/H5FAdblock.c
-./src/H5FAhdr.c
-./src/H5FAint.c
-./src/H5FAmodule.h
-./src/H5FApkg.h
-./src/H5FAprivate.h
-./src/H5FAstat.c
-./src/H5FAtest.c
-./src/H5FD.c
-./src/H5FDcore.c
-./src/H5FDcore.h
-./src/H5FDdevelop.h
-./src/H5FDdirect.c
-./src/H5FDdirect.h
-./src/H5FDdrvr_module.h
-./src/H5FDfamily.c
-./src/H5FDfamily.h
-./src/H5FDhdfs.c
-./src/H5FDhdfs.h
-./src/H5FDint.c
-./src/H5FDlog.c
-./src/H5FDlog.h
-./src/H5FDmirror.c
-./src/H5FDmirror.h
-./src/H5FDmirror_priv.h
-./src/H5FDmodule.h
-./src/H5FDmpi.c
-./src/H5FDmpi.h
-./src/H5FDmpio.c
-./src/H5FDmpio.h
-./src/H5FDmulti.c
-./src/H5FDmulti.h
-./src/H5FDperform.c
-./src/H5FDros3.c
-./src/H5FDros3.h
-./src/H5FDpkg.h
-./src/H5FDprivate.h
-./src/H5FDpublic.h
-./src/H5FDs3comms.h
-./src/H5FDs3comms.c
-./src/H5FDsec2.c
-./src/H5FDsec2.h
-./src/H5FDspace.c
-./src/H5FDsplitter.c
-./src/H5FDsplitter.h
-./src/H5FDstdio.c
-./src/H5FDstdio.h
-./src/H5FDtest.c
-./src/H5FDwindows.c
-./src/H5FDwindows.h
-./src/H5FL.c
-./src/H5FLmodule.h
-./src/H5FLprivate.h
-./src/H5FO.c
-./src/H5FOprivate.h
-./src/H5FS.c
-./src/H5FScache.c
-./src/H5FSdbg.c
-./src/H5FSint.c
-./src/H5FSmodule.h
-./src/H5FSpkg.h
-./src/H5FSprivate.h
-./src/H5FSsection.c
-./src/H5FSstat.c
-./src/H5FStest.c
-./src/H5G.c
-./src/H5Gbtree2.c
-./src/H5Gcache.c
-./src/H5Gcompact.c
-./src/H5Gdense.c
-./src/H5Gdeprec.c
-./src/H5Gent.c
-./src/H5Gint.c
-./src/H5Glink.c
-./src/H5Gloc.c
-./src/H5Gmodule.h
-./src/H5Gname.c
-./src/H5Gnode.c
-./src/H5Gobj.c
-./src/H5Goh.c
-./src/H5Gpkg.h
-./src/H5Gprivate.h
-./src/H5Gpublic.h
-./src/H5Groot.c
-./src/H5Gstab.c
-./src/H5Gtest.c
-./src/H5Gtraverse.c
-./src/H5HF.c
-./src/H5HFbtree2.c
-./src/H5HFcache.c
-./src/H5HFdbg.c
-./src/H5HFdblock.c
-./src/H5HFdtable.c
-./src/H5HFhdr.c
-./src/H5HFhuge.c
-./src/H5HFiblock.c
-./src/H5HFiter.c
-./src/H5HFman.c
-./src/H5HFmodule.h
-./src/H5HFpkg.h
-./src/H5HFprivate.h
-./src/H5HFsection.c
-./src/H5HFspace.c
-./src/H5HFstat.c
-./src/H5HFtest.c
-./src/H5HFtiny.c
-./src/H5HG.c
-./src/H5HGcache.c
-./src/H5HGdbg.c
-./src/H5HGmodule.h
-./src/H5HGpkg.h
-./src/H5HGprivate.h
-./src/H5HGquery.c
-./src/H5HL.c
-./src/H5HLcache.c
-./src/H5HLdbg.c
-./src/H5HLdblk.c
-./src/H5HLint.c
-./src/H5HLmodule.h
-./src/H5HLpkg.h
-./src/H5HLprfx.c
-./src/H5HLprivate.h
-./src/H5HP.c
-./src/H5HPprivate.h
-./src/H5I.c
-./src/H5Idbg.c
-./src/H5Idevelop.h
-./src/H5Iint.c
-./src/H5Imodule.h
-./src/H5Ipkg.h
-./src/H5Iprivate.h
-./src/H5Ipublic.h
-./src/H5Itest.c
-./src/H5L.c
-./src/H5Ldeprec.c
-./src/H5Ldevelop.h
-./src/H5Lexternal.c
-./src/H5Lint.c
-./src/H5Lmodule.h
-./src/H5Lpkg.h
-./src/H5Lprivate.h
-./src/H5Lpublic.h
-./src/H5M.c
-./src/H5Mmodule.h
-./src/H5Mpkg.h
-./src/H5Mprivate.h
-./src/H5Mpublic.h
-./src/H5MF.c
-./src/H5MFaggr.c
-./src/H5MFdbg.c
-./src/H5MFmodule.h
-./src/H5MFsection.c
-./src/H5MFpkg.h
-./src/H5MFprivate.h
-./src/H5MM.c
-./src/H5MMprivate.h
-./src/H5MMpublic.h
-./src/H5MP.c
-./src/H5MPmodule.h
-./src/H5MPpkg.h
-./src/H5MPprivate.h
-./src/H5MPtest.c
-./src/H5O.c
-./src/H5Oainfo.c
-./src/H5Oalloc.c
-./src/H5Oattr.c
-./src/H5Oattribute.c
-./src/H5Obogus.c
-./src/H5Obtreek.c
-./src/H5Ocache.c
-./src/H5Ocache_image.c
-./src/H5Ochunk.c
-./src/H5Ocont.c
-./src/H5Ocopy.c
-./src/H5Ocopy_ref.c
-./src/H5Odbg.c
-./src/H5Odeprec.c
-./src/H5Odrvinfo.c
-./src/H5Odtype.c
-./src/H5Oefl.c
-./src/H5Ofill.c
-./src/H5Oflush.c
-./src/H5Ofsinfo.c
-./src/H5Oginfo.c
-./src/H5Oint.c
-./src/H5Olayout.c
-./src/H5Olinfo.c
-./src/H5Olink.c
-./src/H5Omessage.c
-./src/H5Omodule.h
-./src/H5Omtime.c
-./src/H5Oname.c
-./src/H5Onull.c
-./src/H5Opkg.h
-./src/H5Opline.c
-./src/H5Oprivate.h
-./src/H5Opublic.h
-./src/H5Orefcount.c
-./src/H5Osdspace.c
-./src/H5Oshared.c
-./src/H5Oshared.h
-./src/H5Oshmesg.c
-./src/H5Ostab.c
-./src/H5Otest.c
-./src/H5Ounknown.c
-./src/H5P.c
-./src/H5Pacpl.c
-./src/H5Pdapl.c
-./src/H5Pdcpl.c
-./src/H5Pdeprec.c
-./src/H5Pdxpl.c
-./src/H5Pencdec.c
-./src/H5Pfapl.c
-./src/H5Pfcpl.c
-./src/H5Pfmpl.c
-./src/H5Pgcpl.c
-./src/H5Pint.c
-./src/H5Plapl.c
-./src/H5Plcpl.c
-./src/H5Pmapl.c
-./src/H5Pmcpl.c
-./src/H5Pmodule.h
-./src/H5Pocpl.c
-./src/H5Pocpypl.c
-./src/H5Ppkg.h
-./src/H5Pprivate.h
-./src/H5Ppublic.h
-./src/H5Pstrcpl.c
-./src/H5Ptest.c
-./src/H5PB.c
-./src/H5PBmodule.h
-./src/H5PBpkg.h
-./src/H5PBprivate.h
-./src/H5PL.c
-./src/H5PLint.c
-./src/H5PLmodule.h
-./src/H5PLpath.c
-./src/H5PLpkg.h
-./src/H5PLplugin_cache.c
-./src/H5PLprivate.h
-./src/H5PLpublic.h
-./src/H5PLextern.h
-./src/H5R.c
-./src/H5Rdeprec.c
-./src/H5Rint.c
-./src/H5Rmodule.h
-./src/H5Rpkg.h
-./src/H5Rprivate.h
-./src/H5Rpublic.h
-./src/H5RS.c
-./src/H5RSmodule.h
-./src/H5RSprivate.h
-./src/H5S.c
-./src/H5Sall.c
-./src/H5Sdbg.c
-./src/H5Sdeprec.c
-./src/H5Shyper.c
-./src/H5Smodule.h
-./src/H5Smpio.c
-./src/H5Snone.c
-./src/H5Spkg.h
-./src/H5Spoint.c
-./src/H5Sprivate.h
-./src/H5Spublic.h
-./src/H5Sselect.c
-./src/H5Stest.c
-./src/H5SL.c
-./src/H5SLmodule.h
-./src/H5SLprivate.h
-./src/H5SM.c
-./src/H5SMbtree2.c
-./src/H5SMcache.c
-./src/H5SMmessage.c
-./src/H5SMmodule.h
-./src/H5SMpkg.h
-./src/H5SMprivate.h
-./src/H5SMtest.c
-./src/H5T.c
-./src/H5Tarray.c
-./src/H5Tbit.c
-./src/H5Tcommit.c
-./src/H5Tcompound.c
-./src/H5Tconv.c
-./src/H5Tcset.c
-./src/H5Tdbg.c
-./src/H5Tdeprec.c
-./src/H5Tdevelop.h
-./src/H5Tenum.c
-./src/H5Tfields.c
-./src/H5Tfixed.c
-./src/H5Tfloat.c
-./src/H5Tmodule.h
-./src/H5Tnative.c
-./src/H5Toffset.c
-./src/H5Topaque.c
-./src/H5Torder.c
-./src/H5Toh.c
-./src/H5Tpad.c
-./src/H5Tpkg.h
-./src/H5Tprecis.c
-./src/H5Tprivate.h
-./src/H5Tpublic.h
-./src/H5Tref.c
-./src/H5Tstrpad.c
-./src/H5Tvisit.c
-./src/H5Tvlen.c
-./src/H5TS.c
-./src/H5TSdevelop.h
-./src/H5TSprivate.h
-./src/H5UC.c
-./src/H5UCprivate.h
-./src/H5VL.c
-./src/H5VLcallback.c
-./src/H5VLconnector.h
-./src/H5VLconnector_passthru.h
-./src/H5VLdyn_ops.c
-./src/H5VLint.c
-./src/H5VLmodule.h
-./src/H5VLnative.c
-./src/H5VLnative.h
-./src/H5VLnative_attr.c
-./src/H5VLnative_blob.c
-./src/H5VLnative_dataset.c
-./src/H5VLnative_datatype.c
-./src/H5VLnative_file.c
-./src/H5VLnative_group.c
-./src/H5VLnative_link.c
-./src/H5VLnative_introspect.c
-./src/H5VLnative_object.c
-./src/H5VLnative_token.c
-./src/H5VLnative_private.h
-./src/H5VLpassthru.c
-./src/H5VLpassthru.h
-./src/H5VLpkg.h
-./src/H5VLprivate.h
-./src/H5VLpublic.h
-./src/H5VLtest.c
-./src/H5VM.c
-./src/H5VMprivate.h
-./src/H5WB.c
-./src/H5WBprivate.h
-./src/H5Z.c
-./src/H5Zdeflate.c
-./src/H5Zdevelop.h
-./src/H5Zfletcher32.c
-./src/H5Zmodule.h
-./src/H5Znbit.c
-./src/H5Zpkg.h
-./src/H5Zprivate.h
-./src/H5Zpublic.h
-./src/H5Zscaleoffset.c
-./src/H5Zshuffle.c
-./src/H5Zszip.c
-./src/H5Ztrans.c
-./src/Makefile.am
-./src/hdf5.h
-./src/libhdf5.settings.in
-./src/H5win32defs.h
-./src/uthash.h
-
-./test/AtomicWriterReader.txt
-./test/H5srcdir.h
-./test/H5srcdir_str.h.in
-./test/Makefile.am
-./test/POSIX_Order_Write_Test_Report.docx
-./test/POSIX_Order_Write_Test_Report.pdf
-./test/SWMR_POSIX_Order_UG.txt
-./test/SWMR_UseCase_UG.txt
-./test/accum.c
-./test/accum_swmr_reader.c
-./test/aggr.h5
-./test/app_ref.c
-./test/atomic_reader.c
-./test/atomic_writer.c
-./test/bad_compound.h5
-./test/bad_offset.h5
-./test/be_data.h5
-./test/be_extlink1.h5
-./test/be_extlink2.h5
-./test/big.c
-./test/bittests.c
-./test/btree2.c
-./test/btree_idx_1_6.h5
-./test/btree_idx_1_8.h5
-./test/cache.c
-./test/cache_api.c
-./test/cache_common.c
-./test/cache_common.h
-./test/cache_image.c
-./test/cache_logging.c
-./test/cache_tagging.c
-./test/chunk_info.c
-./test/cmpd_dset.c
-./test/cmpd_dtransform.c
-./test/cork.c
-./test/corrupt_stab_msg.h5
-./test/cross_read.c
-./test/cve_2020_10810.h5
-./test/dangle.c
-./test/deflate.h5
-./test/del_many_dense_attrs.c
-./test/direct_chunk.c
-./test/dsets.c
-./test/dt_arith.c
-./test/dtransform.c
-./test/dtypes.c
-./test/earray.c
-./test/efc.c
-./test/enc_dec_plist.c
-./test/enc_dec_plist_cross_platform.c
-./test/enum.c
-./test/err_compat.c
-./test/error_test.c
-./test/event_set.c
-./test/evict_on_close.c
-./test/extend.c
-./test/external.c
-./test/external_common.c
-./test/external_common.h
-./test/external_env.c
-./test/external_fname.h
-./test/family_v16-000000.h5
-./test/family_v16-000001.h5
-./test/family_v16-000002.h5
-./test/family_v16-000003.h5
-./test/farray.c
-./test/fheap.c
-./test/file_image.c
-./test/file_image_core_test.h5
-./test/filenotclosed.c
-./test/filespace_1_6.h5
-./test/filespace_1_8.h5
-./test/fill18.h5
-./test/fill_old.h5
-./test/fillval.c
-./test/filter_error.h5
-./test/filter_fail.c
-./test/filter_plugin.c
-./test/filter_plugin1_dsets.c
-./test/filter_plugin2_dsets.c
-./test/filter_plugin3_dsets.c
-./test/filter_plugin4_groups.c
-./test/flush1.c
-./test/flush2.c
-./test/flushrefresh.c
-./test/freespace.c
-./test/fsm_aggr_nopersist.h5
-./test/fsm_aggr_persist.h5
-./test/gen_bad_compound.c
-./test/gen_bad_offset.c
-./test/gen_bad_ohdr.c
-./test/gen_bogus.c
-./test/gen_bounds.c
-./test/gen_cross.c
-./test/gen_deflate.c
-./test/gen_file_image.c
-./test/gen_filespace.c
-./test/gen_filters.c
-./test/gen_mergemsg.c
-./test/gen_new_array.c
-./test/gen_new_fill.c
-./test/gen_new_group.c
-./test/gen_new_mtime.c
-./test/gen_new_super.c
-./test/gen_noencoder.c
-./test/gen_nullspace.c
-./test/gen_old_array.c
-./test/gen_old_group.c
-./test/gen_old_layout.c
-./test/gen_old_mtime.c
-./test/gen_plist.c
-./test/gen_sizes_lheap.c
-./test/gen_specmetaread.c
-./test/gen_udlinks.c
-./test/genall5.c
-./test/genall5.h
-./test/getname.c
-./test/gheap.c
-./test/group_old.h5
-./test/h5fc_ext1_f.h5
-./test/h5fc_ext1_i.h5
-./test/h5fc_ext2_if.h5
-./test/h5fc_ext2_sf.h5
-./test/h5fc_ext3_isf.h5
-./test/h5fc_ext_none.h5
-./test/h5test.c
-./test/h5test.h
-./test/hdfs.c
-./test/hyperslab.c
-./test/istore.c
-./test/le_data.h5
-./test/le_extlink1.h5
-./test/le_extlink2.h5
-./test/lheap.c
-./test/links.c
-./test/links_env.c
-./test/memleak_H5O_dtype_decode_helper_H5Odtype.h5
-./test/mergemsg.h5
-./test/mf.c
-./test/mirror_vfd.c
-./test/mount.c
-./test/mtime.c
-./test/multi_file_v16-r.h5
-./test/multi_file_v16-s.h5
-./test/noencoder.h5
-./test/none.h5
-./test/ntypes.c
-./test/null_vol_connector.c
-./test/null_vol_connector.h
-./test/null_vfd_plugin.c
-./test/null_vfd_plugin.h
-./test/objcopy.c
-./test/objcopy_ref.c
-./test/ohdr.c
-./test/page_buffer.c
-./test/paged_nopersist.h5
-./test/paged_persist.h5
-./test/pool.c
-./test/reserved.c
-./test/ros3.c
-./test/s3comms.c
-./test/set_extent.c
-# ====distribute this for now. See HDFFV-8236====
-./test/space_overflow.c
-# ====end distribute this for now. See HDFFV-8236====
-./test/specmetaread.h5
-./test/stab.c
-./test/swmr.c
-./test/swmr_addrem_writer.c
-./test/swmr_common.c
-./test/swmr_common.h
-./test/swmr_generator.c
-./test/swmr_reader.c
-./test/swmr_remove_reader.c
-./test/swmr_remove_writer.c
-./test/swmr_sparse_reader.c
-./test/swmr_sparse_writer.c
-./test/swmr_start_write.c
-./test/swmr_writer.c
-./test/tarray.c
-./test/tarrold.h5
-./test/tattr.c
-./test/tbad_msg_count.h5
-./test/tbogus.h5
-./test/tcheck_version.c
-./test/tchecksum.c
-./test/tconfig.c
-./test/tcoords.c
-./test/test_filter_plugin.sh.in
-./test/test_filters_be.h5
-./test/test_filters_le.h5
-./test/test_usecases.sh.in
-./test/test_vol_plugin.sh.in
-./test/testabort_fail.sh.in
-./test/testcheck_version.sh.in
-./test/testerror.sh.in
-./test/testexternal_env.sh.in
-./test/testflushrefresh.sh.in
-./test/testframe.c
-./test/testhdf5.c
-./test/testhdf5.h
-./test/testlibinfo.sh.in
-./test/testlinks_env.sh.in
-./test/testmeta.c
-./test/test_mirror.sh.in
-./test/testswmr.pwsh.in
-./test/testswmr.sh.in
-./test/testvds_env.sh.in
-./test/testvdsswmr.pwsh.in
-./test/testvdsswmr.sh.in
-./test/tfile.c
-./test/tgenprop.c
-./test/th5o.c
-./test/th5s.c
-./test/th5s.h5
-./test/theap.c
-./test/thread_id.c
-./test/tid.c
-./test/timer.c
-./test/titerate.c
-./test/tlayouto.h5
-./test/tmeta.c
-./test/tmisc.c
-./test/tmtimen.h5
-./test/tmtimeo.h5
-./test/trefer.c
-./test/trefer_deprec.c
-./test/trefer_shutdown.c
-./test/trefstr.c
-./test/tselect.c
-./test/tsizeslheap.h5
-./test/tskiplist.c
-./test/tsohm.c
-./test/ttime.c
-./test/ttsafe.c
-./test/ttsafe.h
-./test/ttsafe_acreate.c
-./test/ttsafe_attr_vlen.c
-./test/ttsafe_cancel.c
-./test/ttsafe_dcreate.c
-./test/ttsafe_error.c
-./test/tunicode.c
-./test/tvlstr.c
-./test/tvltypes.c
-./test/twriteorder.c
-./test/unlink.c
-./test/unregister.c
-./test/use.h
-./test/use_append_chunk.c
-./test/use_append_chunk_mirror.c
-./test/use_append_mchunks.c
-./test/use_common.c
-./test/use_disable_mdc_flushes.c
-./test/vds.c
-./test/vds_env.c
-./test/vds_swmr.h
-./test/vds_swmr_gen.c
-./test/vds_swmr_reader.c
-./test/vds_swmr_writer.c
-./test/vfd.c
-./test/vfd_plugin.c
-./test/vol.c
-./test/vol_plugin.c
-
-./test/testfiles/err_compat_1
-./test/testfiles/err_compat_2
-./test/testfiles/error_test_1
-./test/testfiles/error_test_2
-./test/testfiles/links_env.out
-./test/testfiles/plist_files/acpl_32be
-./test/testfiles/plist_files/acpl_32le
-./test/testfiles/plist_files/acpl_64be
-./test/testfiles/plist_files/acpl_64le
-./test/testfiles/plist_files/dapl_32be
-./test/testfiles/plist_files/dapl_32le
-./test/testfiles/plist_files/dapl_64be
-./test/testfiles/plist_files/dapl_64le
-./test/testfiles/plist_files/dcpl_32be
-./test/testfiles/plist_files/dcpl_32le
-./test/testfiles/plist_files/dcpl_64be
-./test/testfiles/plist_files/dcpl_64le
-./test/testfiles/plist_files/def_acpl_32be
-./test/testfiles/plist_files/def_acpl_32le
-./test/testfiles/plist_files/def_acpl_64be
-./test/testfiles/plist_files/def_acpl_64le
-./test/testfiles/plist_files/def_dapl_32be
-./test/testfiles/plist_files/def_dapl_32le
-./test/testfiles/plist_files/def_dapl_64be
-./test/testfiles/plist_files/def_dapl_64le
-./test/testfiles/plist_files/def_dcpl_32be
-./test/testfiles/plist_files/def_dcpl_32le
-./test/testfiles/plist_files/def_dcpl_64be
-./test/testfiles/plist_files/def_dcpl_64le
-./test/testfiles/plist_files/def_dxpl_32be
-./test/testfiles/plist_files/def_dxpl_32le
-./test/testfiles/plist_files/def_dxpl_64be
-./test/testfiles/plist_files/def_dxpl_64le
-./test/testfiles/plist_files/def_fapl_32be
-./test/testfiles/plist_files/def_fapl_32le
-./test/testfiles/plist_files/def_fapl_64be
-./test/testfiles/plist_files/def_fapl_64le
-./test/testfiles/plist_files/def_fcpl_32be
-./test/testfiles/plist_files/def_fcpl_32le
-./test/testfiles/plist_files/def_fcpl_64be
-./test/testfiles/plist_files/def_fcpl_64le
-./test/testfiles/plist_files/def_gcpl_32be
-./test/testfiles/plist_files/def_gcpl_32le
-./test/testfiles/plist_files/def_gcpl_64be
-./test/testfiles/plist_files/def_gcpl_64le
-./test/testfiles/plist_files/def_lapl_32be
-./test/testfiles/plist_files/def_lapl_32le
-./test/testfiles/plist_files/def_lapl_64be
-./test/testfiles/plist_files/def_lapl_64le
-./test/testfiles/plist_files/def_lcpl_32be
-./test/testfiles/plist_files/def_lcpl_32le
-./test/testfiles/plist_files/def_lcpl_64be
-./test/testfiles/plist_files/def_lcpl_64le
-./test/testfiles/plist_files/def_ocpl_32be
-./test/testfiles/plist_files/def_ocpl_32le
-./test/testfiles/plist_files/def_ocpl_64be
-./test/testfiles/plist_files/def_ocpl_64le
-./test/testfiles/plist_files/def_ocpypl_32be
-./test/testfiles/plist_files/def_ocpypl_32le
-./test/testfiles/plist_files/def_ocpypl_64be
-./test/testfiles/plist_files/def_ocpypl_64le
-./test/testfiles/plist_files/def_strcpl_32be
-./test/testfiles/plist_files/def_strcpl_32le
-./test/testfiles/plist_files/def_strcpl_64be
-./test/testfiles/plist_files/def_strcpl_64le
-./test/testfiles/plist_files/dxpl_32be
-./test/testfiles/plist_files/dxpl_32le
-./test/testfiles/plist_files/dxpl_64be
-./test/testfiles/plist_files/dxpl_64le
-./test/testfiles/plist_files/fapl_32be
-./test/testfiles/plist_files/fapl_32le
-./test/testfiles/plist_files/fapl_64be
-./test/testfiles/plist_files/fapl_64le
-./test/testfiles/plist_files/fcpl_32be
-./test/testfiles/plist_files/fcpl_32le
-./test/testfiles/plist_files/fcpl_64be
-./test/testfiles/plist_files/fcpl_64le
-./test/testfiles/plist_files/gcpl_32be
-./test/testfiles/plist_files/gcpl_32le
-./test/testfiles/plist_files/gcpl_64be
-./test/testfiles/plist_files/gcpl_64le
-./test/testfiles/plist_files/lapl_32be
-./test/testfiles/plist_files/lapl_32le
-./test/testfiles/plist_files/lapl_64be
-./test/testfiles/plist_files/lapl_64le
-./test/testfiles/plist_files/lcpl_32be
-./test/testfiles/plist_files/lcpl_32le
-./test/testfiles/plist_files/lcpl_64be
-./test/testfiles/plist_files/lcpl_64le
-./test/testfiles/plist_files/ocpl_32be
-./test/testfiles/plist_files/ocpl_32le
-./test/testfiles/plist_files/ocpl_64be
-./test/testfiles/plist_files/ocpl_64le
-./test/testfiles/plist_files/ocpypl_32be
-./test/testfiles/plist_files/ocpypl_32le
-./test/testfiles/plist_files/ocpypl_64be
-./test/testfiles/plist_files/ocpypl_64le
-./test/testfiles/plist_files/strcpl_32be
-./test/testfiles/plist_files/strcpl_32le
-./test/testfiles/plist_files/strcpl_64be
-./test/testfiles/plist_files/strcpl_64le
-
-./testpar/Makefile.am
-./testpar/t_bigio.c
-./testpar/t_cache.c
-./testpar/t_cache_image.c
-./testpar/t_chunk_alloc.c
-./testpar/t_coll_chunk.c
-./testpar/t_coll_md_read.c
-./testpar/t_dset.c
-./testpar/t_file.c
-./testpar/t_file_image.c
-./testpar/t_filter_read.c
-./testpar/t_filters_parallel.c
-./testpar/t_filters_parallel.h
-./testpar/t_mdset.c
-./testpar/t_mpi.c
-./testpar/t_ph5basic.c
-./testpar/t_pflush1.c
-./testpar/t_pflush2.c
-./testpar/t_pread.c
-./testpar/t_prop.c
-./testpar/t_shapesame.c
-./testpar/t_pshutdown.c
-./testpar/t_prestart.c
-./testpar/t_span_tree.c
-./testpar/t_init_term.c
-./testpar/t_2Gio.c
-./testpar/testpar.h
-./testpar/testpflush.sh.in
-./testpar/testphdf5.c
-./testpar/testphdf5.h
-
-./tools/Makefile.am
-./tools/src/Makefile.am
-./tools/test/Makefile.am
-
-./tools/src/h5dump/Makefile.am
-./tools/src/h5dump/h5dump.c
-./tools/src/h5dump/h5dump.h
-./tools/src/h5dump/h5dump_defines.h
-./tools/src/h5dump/h5dump_extern.h
-./tools/src/h5dump/h5dump_ddl.c
-./tools/src/h5dump/h5dump_ddl.h
-./tools/src/h5dump/h5dump_xml.c
-./tools/src/h5dump/h5dump_xml.h
-./tools/test/h5dump/Makefile.am
-./tools/test/h5dump/dynlib_dump.c
-./tools/test/h5dump/h5dumpgentest.c
-./tools/test/h5dump/h5dump_plugin.sh.in
-./tools/test/h5dump/testh5dump.sh.in
-./tools/test/h5dump/testh5dumppbits.sh.in
-./tools/test/h5dump/testh5dumpxml.sh.in
-./tools/test/h5dump/testh5dumpvds.sh.in
-./tools/test/h5dump/binread.c
-
-./tools/src/h5import/Makefile.am
-./tools/src/h5import/h5import.h
-./tools/src/h5import/h5import.c
-./tools/test/h5import/Makefile.am
-./tools/test/h5import/h5importtest.c
-./tools/test/h5import/h5importtestutil.sh.in
-
-# testfiles for h5import
-./tools/test/h5import/testfiles/binfp64.h5
-./tools/test/h5import/testfiles/binin16.h5
-./tools/test/h5import/testfiles/binin32.h5
-./tools/test/h5import/testfiles/binin8.h5
-./tools/test/h5import/testfiles/binin8w.h5
-./tools/test/h5import/testfiles/binuin16.h5
-./tools/test/h5import/testfiles/binuin32.h5
-./tools/test/h5import/testfiles/tall_fp32.ddl
-./tools/test/h5import/testfiles/tall_i32.ddl
-./tools/test/h5import/testfiles/tintsattrs_u32.ddl
-./tools/test/h5import/testfiles/textpfe.conf
-./tools/test/h5import/testfiles/textpfe.h5
-./tools/test/h5import/testfiles/textpfe64.txt
-./tools/test/h5import/testfiles/txtfp32.conf
-./tools/test/h5import/testfiles/txtfp32.h5
-./tools/test/h5import/testfiles/txtfp32.txt
-./tools/test/h5import/testfiles/txtfp64.conf
-./tools/test/h5import/testfiles/txtfp64.h5
-./tools/test/h5import/testfiles/txtfp64.txt
-./tools/test/h5import/testfiles/txtin16.conf
-./tools/test/h5import/testfiles/txtin16.h5
-./tools/test/h5import/testfiles/txtin16.txt
-./tools/test/h5import/testfiles/txtin32.conf
-./tools/test/h5import/testfiles/txtin32.h5
-./tools/test/h5import/testfiles/txtin32.txt
-./tools/test/h5import/testfiles/txtin8.conf
-./tools/test/h5import/testfiles/txtin8.h5
-./tools/test/h5import/testfiles/txtin8.txt
-./tools/test/h5import/testfiles/txtuin16.conf
-./tools/test/h5import/testfiles/txtuin16.h5
-./tools/test/h5import/testfiles/txtuin16.txt
-./tools/test/h5import/testfiles/txtuin32.conf
-./tools/test/h5import/testfiles/txtuin32.h5
-./tools/test/h5import/testfiles/txtuin32.txt
-./tools/test/h5import/testfiles/txtstr.conf
-./tools/test/h5import/testfiles/txtstr.h5
-./tools/test/h5import/testfiles/txtstr.txt
-./tools/test/h5import/testfiles/dbinfp64.h5.txt
-./tools/test/h5import/testfiles/dbinin8.h5.txt
-./tools/test/h5import/testfiles/dbinin8w.h5.txt
-./tools/test/h5import/testfiles/dbinin16.h5.txt
-./tools/test/h5import/testfiles/dbinin32.h5.txt
-./tools/test/h5import/testfiles/dbinuin16.h5.txt
-./tools/test/h5import/testfiles/dbinuin32.h5.txt
-./tools/test/h5import/testfiles/dtxtstr.h5.txt
-
-# h5diff sources
-./tools/src/h5diff/Makefile.am
-./tools/src/h5diff/h5diff_common.c
-./tools/src/h5diff/h5diff_common.h
-./tools/src/h5diff/h5diff_main.c
-./tools/src/h5diff/ph5diff_main.c
-./tools/test/h5diff/Makefile.am
-./tools/test/h5diff/dynlib_diff.c
-./tools/test/h5diff/h5diffgentest.c
-./tools/test/h5diff/h5diff_plugin.sh.in
-./tools/test/h5diff/testh5diff.sh.in
-./tools/test/h5diff/testph5diff.sh.in
-
-# h5format_convert sources
-./tools/src/h5format_convert/Makefile.am
-./tools/src/h5format_convert/h5format_convert.c
-./tools/test/h5format_convert/Makefile.am
-./tools/test/h5format_convert/h5fc_chk_idx.c
-./tools/test/h5format_convert/h5fc_gentest.c
-./tools/test/h5format_convert/testfiles/h5fc_v_n_all.ddl
-./tools/test/h5format_convert/testfiles/h5fc_v_bt1.ddl
-./tools/test/h5format_convert/testfiles/h5fc_v_err.ddl
-./tools/test/h5format_convert/testfiles/h5fc_v_err.ddl.err
-./tools/test/h5format_convert/testfiles/h5fc_v_non_chunked.ddl
-./tools/test/h5format_convert/testfiles/h5fc_d_file.ddl
-./tools/test/h5format_convert/testfiles/h5fc_d_file.ddl.err
-./tools/test/h5format_convert/testfiles/h5fc_d_file-d.ddl
-./tools/test/h5format_convert/testfiles/h5fc_v_ndata_bt1.ddl
-./tools/test/h5format_convert/testfiles/h5fc_dname.ddl
-./tools/test/h5format_convert/testfiles/h5fc_dname.err
-./tools/test/h5format_convert/testfiles/h5fc_nonexistfile.ddl.err
-./tools/test/h5format_convert/testfiles/h5fc_nonexistdset_file.ddl.err
-./tools/test/h5format_convert/testfiles/h5fc_help.ddl
-./tools/test/h5format_convert/testfiles/h5fc_v_all.ddl
-./tools/test/h5format_convert/testfiles/h5fc_nooption.ddl
-./tools/test/h5format_convert/testfiles/h5fc_v_n_1d.ddl
-./tools/test/h5format_convert/testfiles/h5fc_non_v3.h5
-./tools/test/h5format_convert/testfiles/h5fc_edge_v3.h5
-./tools/test/h5format_convert/testfiles/h5fc_err_level.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext1_f.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext1_i.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext1_s.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext2_if.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext2_is.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext2_sf.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext3_isf.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext_none.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.h5
-./tools/test/h5format_convert/testfiles/old_h5fc_ext_none.h5
-./tools/test/h5format_convert/testfiles/h5fc_ext1_f.ddl
-./tools/test/h5format_convert/testfiles/h5fc_ext1_i.ddl
-./tools/test/h5format_convert/testfiles/h5fc_ext1_s.ddl
-./tools/test/h5format_convert/testfiles/h5fc_ext2_if.ddl
-./tools/test/h5format_convert/testfiles/h5fc_ext2_is.ddl
-./tools/test/h5format_convert/testfiles/h5fc_ext2_sf.ddl
-./tools/test/h5format_convert/testfiles/h5fc_ext3_isf.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.ddl
-./tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.ddl
-./tools/test/h5format_convert/testh5fc.sh.in
-
-# h5repack sources
-./tools/src/h5repack/Makefile.am
-./tools/src/h5repack/h5repack.c
-./tools/src/h5repack/h5repack.h
-./tools/src/h5repack/h5repack_copy.c
-./tools/src/h5repack/h5repack_refs.c
-./tools/src/h5repack/h5repack_filters.c
-./tools/src/h5repack/h5repack_main.c
-./tools/src/h5repack/h5repack_opttable.c
-./tools/src/h5repack/h5repack_parse.c
-./tools/src/h5repack/h5repack_verify.c
-./tools/test/h5repack/Makefile.am
-./tools/test/h5repack/dynlib_rpk.c
-./tools/test/h5repack/dynlib_vrpk.c
-./tools/test/h5repack/h5repack.sh.in
-./tools/test/h5repack/h5repackgentest.c
-./tools/test/h5repack/h5repack_plugin.sh.in
-./tools/test/h5repack/h5repacktst.c
-./tools/test/h5repack/testh5repack_detect_szip.c
-
-# h5ls sources
-./tools/src/h5ls/Makefile.am
-./tools/src/h5ls/h5ls.c
-./tools/test/h5ls/Makefile.am
-./tools/test/h5ls/dynlib_ls.c
-./tools/test/h5ls/h5ls_plugin.sh.in
-./tools/test/h5ls/testh5ls.sh.in
-./tools/test/h5ls/testh5lsvds.sh.in
-./tools/test/h5ls/vds_prefix/tvds-1.ls
-./tools/test/h5ls/vds_prefix/tvds-2.ls
-./tools/test/h5ls/vds_prefix/tvds-3_1.ls
-./tools/test/h5ls/vds_prefix/tvds-3_2.ls
-./tools/test/h5ls/vds_prefix/tvds-4.ls
-./tools/test/h5ls/vds_prefix/tvds-5.ls
-
-# h5ls test error files
-./tools/test/h5ls/errfiles/nosuchfile.err
-./tools/test/h5ls/errfiles/textlinksrc-nodangle-1.err
-./tools/test/h5ls/errfiles/tgroup-1.err
-
-# h5copy sources
-./tools/src/h5copy/Makefile.am
-./tools/src/h5copy/h5copy.c
-./tools/test/h5copy/Makefile.am
-./tools/test/h5copy/h5copygentest.c
-./tools/test/h5copy/testh5copy.sh.in
-./tools/test/h5copy/dynlib_copy.c
-
-
-./tools/lib/Makefile.am
-./tools/lib/h5diff.c
-./tools/lib/h5diff.h
-./tools/lib/h5diff_array.c
-./tools/lib/h5diff_attr.c
-./tools/lib/h5diff_dset.c
-./tools/lib/h5diff_util.c
-./tools/lib/h5trav.c
-./tools/lib/h5trav.h
-./tools/lib/h5tools.c
-./tools/lib/h5tools.h
-./tools/lib/h5tools_dump.c
-./tools/lib/h5tools_dump.h
-./tools/lib/h5tools_filters.c
-./tools/lib/h5tools_str.c
-./tools/lib/h5tools_str.h
-./tools/lib/h5tools_utils.c
-./tools/lib/h5tools_utils.h
-./tools/lib/h5tools_ref.c
-./tools/lib/h5tools_ref.h
-./tools/lib/h5tools_type.c
-./tools/lib/ph5diff.h
-./tools/lib/h5tools_error.h
-./tools/lib/io_timer.c
-./tools/lib/io_timer.h
-
-./tools/libtest/Makefile.am
-./tools/libtest/h5tools_test_utils.c
-
-./tools/src/misc/Makefile.am
-./tools/src/misc/h5clear.c
-./tools/src/misc/h5debug.c
-./tools/src/misc/h5delete.c
-./tools/src/misc/h5mkgrp.c
-./tools/src/misc/h5repart.c
-./tools/test/misc/Makefile.am
-./tools/test/misc/h5repart_gentest.c
-./tools/test/misc/repart_test.c
-./tools/test/misc/testh5mkgrp.sh.in
-./tools/test/misc/testh5repart.sh.in
-./tools/test/misc/talign.c
-./tools/test/misc/testfiles/h5clear_equal_after_size.ddl
-./tools/test/misc/testfiles/h5clear_equal_before_size.ddl
-./tools/test/misc/testfiles/h5clear_greater_after_size.ddl
-./tools/test/misc/testfiles/h5clear_greater_before_size.ddl
-./tools/test/misc/testfiles/h5clear_less_after_size.ddl
-./tools/test/misc/testfiles/h5clear_less_before_size.ddl
-./tools/test/misc/testfiles/h5clear_missing_file.ddl
-./tools/test/misc/testfiles/h5clear_missing_file.err
-./tools/test/misc/testfiles/h5clear_noclose_after_size.ddl
-./tools/test/misc/testfiles/h5clear_noclose_before_size.ddl
-./tools/test/misc/testfiles/h5clear_no_mdc_image.err
-./tools/test/misc/testfiles/h5clear_open_fail.err
-./tools/test/misc/testfiles/h5clear_status_noclose_after_size.ddl
-./tools/test/misc/testfiles/h5clear_usage.ddl
-./tools/test/misc/testfiles/h5clear_user_equal_after_size.ddl
-./tools/test/misc/testfiles/h5clear_user_equal_before_size.ddl
-./tools/test/misc/testfiles/h5clear_user_greater_after_size.ddl
-./tools/test/misc/testfiles/h5clear_user_greater_before_size.ddl
-./tools/test/misc/testfiles/h5clear_user_less_after_size.ddl
-./tools/test/misc/testfiles/h5clear_user_less_before_size.ddl
-./tools/test/misc/testfiles/h5clear_fsm_persist_equal.h5
-./tools/test/misc/testfiles/h5clear_fsm_persist_greater.h5
-./tools/test/misc/testfiles/h5clear_fsm_persist_less.h5
-./tools/test/misc/testfiles/h5clear_fsm_persist_noclose.h5
-./tools/test/misc/testfiles/h5clear_fsm_persist_user_equal.h5
-./tools/test/misc/testfiles/h5clear_fsm_persist_user_greater.h5
-./tools/test/misc/testfiles/h5clear_fsm_persist_user_less.h5
-./tools/test/misc/testfiles/h5clear_log_v3.h5
-./tools/test/misc/testfiles/h5clear_mdc_image.h5
-./tools/test/misc/testfiles/h5clear_sec2_v0.h5
-./tools/test/misc/testfiles/h5clear_sec2_v2.h5
-./tools/test/misc/testfiles/h5clear_sec2_v3.h5
-./tools/test/misc/testfiles/h5clear_status_noclose.h5
-./tools/test/misc/testfiles/latest_h5clear_log_v3.h5
-./tools/test/misc/testfiles/latest_h5clear_sec2_v3.h5
-./tools/test/misc/testfiles/mod_h5clear_mdc_image.h5
-./tools/test/misc/testfiles/h5mkgrp_help.txt
-./tools/test/misc/testfiles/h5mkgrp_version.txt.in
-./tools/test/misc/h5perf_gentest.c
-./tools/test/misc/vds/Makefile.am
-./tools/test/misc/vds/UC_1.h
-./tools/test/misc/vds/UC_1_one_dim_gen.c
-./tools/test/misc/vds/UC_2.h
-./tools/test/misc/vds/UC_2_two_dims_gen.c
-./tools/test/misc/vds/UC_3.h
-./tools/test/misc/vds/UC_3_gaps_gen.c
-./tools/test/misc/vds/UC_4.h
-./tools/test/misc/vds/UC_4_printf_gen.c
-./tools/test/misc/vds/UC_5.h
-./tools/test/misc/vds/UC_5_stride_gen.c
-./tools/test/misc/vds/UC_common.h
-./tools/test/misc/h5clear_gentest.c
-./tools/test/misc/clear_open_chk.c
-./tools/test/misc/testh5clear.sh.in
-
-# h5stat sources
-./tools/src/h5stat/Makefile.am
-./tools/src/h5stat/h5stat.c
-
-# h5stat test files
-./tools/test/h5stat/Makefile.am
-./tools/test/h5stat/h5stat_gentest.c
-./tools/test/h5stat/testh5stat.sh.in
-./tools/test/h5stat/testfiles/h5stat_dims1.ddl
-./tools/test/h5stat/testfiles/h5stat_dims2.ddl
-./tools/test/h5stat/testfiles/h5stat_err_old_fill.h5
-./tools/test/h5stat/testfiles/h5stat_err_old_fill.ddl
-./tools/test/h5stat/testfiles/h5stat_err_old_layout.h5
-./tools/test/h5stat/testfiles/h5stat_err_old_layout.ddl
-./tools/test/h5stat/testfiles/h5stat_err_refcount.h5
-./tools/test/h5stat/testfiles/h5stat_err_refcount.ddl
-./tools/test/h5stat/testfiles/h5stat_filters.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-d.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-dT.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-F.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-file.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-g.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-UD.ddl
-./tools/test/h5stat/testfiles/h5stat_filters-UT.ddl
-./tools/test/h5stat/testfiles/h5stat_filters.h5
-./tools/test/h5stat/testfiles/h5stat_help1.ddl
-./tools/test/h5stat/testfiles/h5stat_help2.ddl
-./tools/test/h5stat/testfiles/h5stat_idx.h5
-./tools/test/h5stat/testfiles/h5stat_idx.ddl
-./tools/test/h5stat/testfiles/h5stat_links1.ddl
-./tools/test/h5stat/testfiles/h5stat_links2.ddl
-./tools/test/h5stat/testfiles/h5stat_links3.ddl
-./tools/test/h5stat/testfiles/h5stat_links4.ddl
-./tools/test/h5stat/testfiles/h5stat_links5.ddl
-./tools/test/h5stat/testfiles/h5stat_newgrat.ddl
-./tools/test/h5stat/testfiles/h5stat_newgrat-UA.ddl
-./tools/test/h5stat/testfiles/h5stat_newgrat-UG.ddl
-./tools/test/h5stat/testfiles/h5stat_newgrat.h5
-./tools/test/h5stat/testfiles/h5stat_nofile.ddl
-./tools/test/h5stat/testfiles/h5stat_notexist.ddl
-./tools/test/h5stat/testfiles/h5stat_numattrs1.ddl
-./tools/test/h5stat/testfiles/h5stat_numattrs2.ddl
-./tools/test/h5stat/testfiles/h5stat_numattrs3.ddl
-./tools/test/h5stat/testfiles/h5stat_numattrs4.ddl
-./tools/test/h5stat/testfiles/h5stat_threshold.h5
-./tools/test/h5stat/testfiles/h5stat_tsohm.ddl
-./tools/test/h5stat/testfiles/h5stat_tsohm.h5
-./tools/test/h5stat/testfiles/h5stat_err_refcount.err
-./tools/test/h5stat/testfiles/h5stat_err_old_layout.err
-./tools/test/h5stat/testfiles/h5stat_err_old_fill.err
-./tools/test/h5stat/testfiles/h5stat_err1_dims.err
-./tools/test/h5stat/testfiles/h5stat_err1_links.err
-./tools/test/h5stat/testfiles/h5stat_err1_numattrs.err
-./tools/test/h5stat/testfiles/h5stat_err2_numattrs.err
-./tools/test/h5stat/testfiles/h5stat_notexist.err
-./tools/test/h5stat/testfiles/h5stat_nofile.err
-
-# h5dump test files
-./tools/testfiles/charsets.h5
-./tools/testfiles/charsets.ddl
-./tools/testfiles/err_attr_dspace.h5
-./tools/testfiles/err_attr_dspace.ddl
-./tools/testfiles/family_file00000.h5
-./tools/testfiles/family_file00001.h5
-./tools/testfiles/family_file00002.h5
-./tools/testfiles/family_file00003.h5
-./tools/testfiles/family_file00004.h5
-./tools/testfiles/family_file00005.h5
-./tools/testfiles/family_file00006.h5
-./tools/testfiles/family_file00007.h5
-./tools/testfiles/family_file00008.h5
-./tools/testfiles/family_file00009.h5
-./tools/testfiles/family_file00010.h5
-./tools/testfiles/family_file00011.h5
-./tools/testfiles/family_file00012.h5
-./tools/testfiles/family_file00013.h5
-./tools/testfiles/family_file00014.h5
-./tools/testfiles/family_file00015.h5
-./tools/testfiles/family_file00016.h5
-./tools/testfiles/family_file00017.h5
-./tools/testfiles/file_space.h5
-./tools/testfiles/file_space.ddl
-./tools/testfiles/filter_fail.h5
-./tools/testfiles/filter_fail.ddl
-./tools/testfiles/h5dump-help.txt
-./tools/testfiles/non_existing.ddl
-./tools/testfiles/packedbits.ddl
-./tools/testfiles/t128bit_float.h5
-./tools/testfiles/taindices.h5
-./tools/testfiles/tall-1.ddl
-./tools/testfiles/tall-2.ddl
-./tools/testfiles/tall-2A.ddl
-./tools/testfiles/tall-2A0.ddl
-./tools/testfiles/tall-2B.ddl
-./tools/testfiles/tall-3.ddl
-./tools/testfiles/tall-4s.ddl
-./tools/testfiles/tall-5s.ddl
-./tools/testfiles/tall-6.ddl
-./tools/testfiles/tall-6.exp
-./tools/testfiles/tall-7.ddl
-./tools/testfiles/tall-7N.ddl
-./tools/testfiles/tall.h5
-./tools/testfiles/tallfilters.ddl
-./tools/testfiles/tarray1.ddl
-./tools/testfiles/tarray1.h5
-./tools/testfiles/tarray1_big.ddl
-./tools/testfiles/tarray1_big.h5
-./tools/testfiles/tarray2.ddl
-./tools/testfiles/tarray2.h5
-./tools/testfiles/tarray3.ddl
-./tools/testfiles/tarray3.h5
-./tools/testfiles/tarray4.ddl
-./tools/testfiles/tarray4.h5
-./tools/testfiles/tarray5.ddl
-./tools/testfiles/tarray5.h5
-./tools/testfiles/tarray6.ddl
-./tools/testfiles/tarray6.h5
-./tools/testfiles/tarray7.ddl
-./tools/testfiles/tarray7.h5
-./tools/testfiles/tarray8.ddl
-./tools/testfiles/tarray8.h5
-./tools/testfiles/tattr.h5
-./tools/testfiles/tattr-1.ddl
-./tools/testfiles/tattr-2.ddl
-./tools/testfiles/tattr-3.ddl
-./tools/testfiles/tattr-4_be.ddl
-./tools/testfiles/tattr2.h5
-./tools/testfiles/tattr4_be.h5
-./tools/testfiles/tattrcontents1.ddl
-./tools/testfiles/tattrcontents2.ddl
-./tools/testfiles/tattrintsize.ddl
-./tools/testfiles/tattrintsize.h5
-./tools/testfiles/tattrreg.h5
-./tools/testfiles/tattrreg.ddl
-./tools/testfiles/tattrregR.ddl
-./tools/testfiles/tbigdims.ddl
-./tools/testfiles/tbigdims.h5
-./tools/testfiles/tbinary.h5
-./tools/testfiles/tbin1.ddl
-./tools/testfiles/tbin2.ddl
-./tools/testfiles/tbin3.ddl
-./tools/testfiles/tbin4.ddl
-./tools/testfiles/tbinregR.exp
-./tools/testfiles/tbinregR.ddl
-./tools/testfiles/tbitfields.h5
-./tools/testfiles/tbitnopaque_be.ddl
-./tools/testfiles/tbitnopaque_le.ddl
-./tools/testfiles/tbitnopaque.h5
-./tools/testfiles/tboot1.ddl
-./tools/testfiles/tboot2.ddl
-./tools/testfiles/tboot2A.ddl
-./tools/testfiles/tboot2B.ddl
-./tools/testfiles/tchar.h5
-./tools/testfiles/tchar1.ddl
-./tools/testfiles/tchunked.ddl
-./tools/testfiles/tcompact.ddl
-./tools/testfiles/tcontents.ddl
-./tools/testfiles/tcontiguos.ddl
-./tools/testfiles/tcmpdattrintsize.ddl
-./tools/testfiles/tcmpdintarray.ddl
-./tools/testfiles/tcmpdints.ddl
-./tools/testfiles/tcmpdintsize.ddl
-./tools/testfiles/tcmpdattrintsize.h5
-./tools/testfiles/tcmpdintarray.h5
-./tools/testfiles/tcmpdints.h5
-./tools/testfiles/tcmpdintsize.h5
-./tools/testfiles/tcomp-1.ddl
-./tools/testfiles/tcomp-2.ddl
-./tools/testfiles/tcomp-3.ddl
-./tools/testfiles/tcomp-4.ddl
-./tools/testfiles/tcompound.h5
-./tools/testfiles/tcompound2.h5
-./tools/testfiles/tcompound_complex.h5
-./tools/testfiles/tcompound_complex2.h5
-./tools/testfiles/tcompound_complex2.ddl
-./tools/testfiles/tdatareg.h5
-./tools/testfiles/tdatareg.ddl
-./tools/testfiles/tdataregR.ddl
-./tools/testfiles/tdeflate.ddl
-./tools/testfiles/tdset-1.ddl
-./tools/testfiles/tdset-2.ddl
-./tools/testfiles/tdset-3s.ddl
-./tools/testfiles/tdset.h5
-./tools/testfiles/tdset2.h5
-./tools/testfiles/tdset_idx.ls
-./tools/testfiles/tdset_idx.h5
-./tools/testfiles/tempty.ddl
-./tools/testfiles/tempty.h5
-./tools/testfiles/tenum.h5
-./tools/testfiles/texceedsubblock.ddl
-./tools/testfiles/texceedsubcount.ddl
-./tools/testfiles/texceedsubstart.ddl
-./tools/testfiles/texceedsubstride.ddl
-./tools/testfiles/texternal.ddl
-./tools/testfiles/textlink.h5
-./tools/testfiles/textlink.h5.xml
-./tools/testfiles/textlink.ddl
-./tools/testfiles/textlinkfar.ddl
-./tools/testfiles/textlinkfar.h5
-./tools/testfiles/textlinksrc.ddl
-./tools/testfiles/textlinksrc.h5
-./tools/testfiles/textlinktar.h5
-./tools/testfiles/tfamily.ddl
-./tools/testfiles/tfamily00000.h5
-./tools/testfiles/tfamily00001.h5
-./tools/testfiles/tfamily00002.h5
-./tools/testfiles/tfamily00003.h5
-./tools/testfiles/tfamily00004.h5
-./tools/testfiles/tfamily00005.h5
-./tools/testfiles/tfamily00006.h5
-./tools/testfiles/tfamily00007.h5
-./tools/testfiles/tfamily00008.h5
-./tools/testfiles/tfamily00009.h5
-./tools/testfiles/tfamily00010.h5
-./tools/testfiles/tfcontents1.h5
-./tools/testfiles/tfcontents2.h5
-./tools/testfiles/tfill.ddl
-./tools/testfiles/tfilters.h5
-./tools/testfiles/tfletcher32.ddl
-./tools/testfiles/tfloatsattrs.ddl
-./tools/testfiles/tfloatsattrs.h5
-./tools/testfiles/tfloatsattrs.wddl
-./tools/testfiles/tfvalues.h5
-./tools/testfiles/tgroup-1.ddl
-./tools/testfiles/tgroup-2.ddl
-./tools/testfiles/tgroup.h5
-./tools/testfiles/tgrp_comments.ls
-./tools/testfiles/tgrp_comments.ddl
-./tools/testfiles/tgrp_comments.h5
-./tools/testfiles/tgrpnullspace.h5
-./tools/testfiles/tgrpnullspace.ddl
-./tools/testfiles/tgrpnullspace.ls
-./tools/testfiles/thlink-1.ddl
-./tools/testfiles/thlink-2.ddl
-./tools/testfiles/thlink-3.ddl
-./tools/testfiles/thlink-4.ddl
-./tools/testfiles/thlink-5.ddl
-./tools/testfiles/thlink.h5
-./tools/testfiles/thyperslab.ddl
-./tools/testfiles/thyperslab.h5
-./tools/testfiles/tindicesyes.ddl
-./tools/testfiles/tindicesno.ddl
-./tools/testfiles/tindicessub2.ddl
-./tools/testfiles/tindicessub3.ddl
-./tools/testfiles/tindicessub4.ddl
-./tools/testfiles/tindicessub1.ddl
-./tools/testfiles/tints4dims.ddl
-./tools/testfiles/tints4dimsBlock2.ddl
-./tools/testfiles/tints4dimsBlockEq.ddl
-./tools/testfiles/tints4dimsCount2.ddl
-./tools/testfiles/tints4dimsCountEq.ddl
-./tools/testfiles/tints4dimsStride2.ddl
-./tools/testfiles/tints4dims.h5
-./tools/testfiles/tintsattrs.ddl
-./tools/testfiles/tintsattrs.h5
-./tools/testfiles/tintsnodata.ddl
-./tools/testfiles/tintsnodata.h5
-./tools/testfiles/tlarge_objname.ddl
-./tools/testfiles/tlarge_objname.h5
-./tools/testfiles/tldouble.ddl
-./tools/testfiles/tldouble.h5
-./tools/testfiles/tldouble_scalar.ddl
-./tools/testfiles/tldouble_scalar.h5
-./tools/testfiles/tlonglinks.ddl
-./tools/testfiles/tlonglinks.h5
-./tools/testfiles/tloop-1.ddl
-./tools/testfiles/tloop.h5
-./tools/testfiles/tloop2.h5
-./tools/testfiles/tmany.h5
-./tools/testfiles/tmulti-b.h5
-./tools/testfiles/tmulti.ddl
-./tools/testfiles/tmulti-g.h5
-./tools/testfiles/tmulti-l.h5
-./tools/testfiles/tmulti-o.h5
-./tools/testfiles/tmulti-r.h5
-./tools/testfiles/tmulti-s.h5
-./tools/testfiles/tmultifile.ls
-./tools/testfiles/tmultifile.ddl
-./tools/testfiles/tnbit.ddl
-./tools/testfiles/tnestcomp-1.ddl
-./tools/testfiles/tnestedcomp.h5
-./tools/testfiles/tnestedcmpddt.ddl
-./tools/testfiles/tnestedcmpddt.h5
-./tools/testfiles/tnoattrdata.ddl
-./tools/testfiles/tnoattrddl.ddl
-./tools/testfiles/tnodata.ddl
-./tools/testfiles/tnoddl.ddl
-./tools/testfiles/tnoddlfile.ddl
-./tools/testfiles/tnoddlfile.exp
-./tools/testfiles/tno-subset.h5
-./tools/testfiles/tno-subset.ddl
-./tools/testfiles/tnullspace.h5
-./tools/testfiles/tnullspace.h5.xml
-./tools/testfiles/tnullspace.ddl
-./tools/testfiles/tobjref.h5
-./tools/testfiles/topaque.h5
-./tools/testfiles/tordercontents1.ddl
-./tools/testfiles/tordercontents2.ddl
-./tools/testfiles/torderlinks1.ddl
-./tools/testfiles/torderlinks2.ddl
-./tools/testfiles/tperror.ddl
-./tools/testfiles/tqmarkfile.ddl
-./tools/testfiles/trawdatafile.ddl
-./tools/testfiles/trawdatafile.exp
-./tools/testfiles/trawssetfile.ddl
-./tools/testfiles/trawssetfile.exp
-./tools/testfiles/treadfilter.ddl
-./tools/testfiles/treadintfilter.ddl
-./tools/testfiles/treference.ddl
-./tools/testfiles/tsaf.ddl
-./tools/testfiles/tsaf.h5
-./tools/testfiles/tscalarattrintsize.ddl
-./tools/testfiles/tscalarattrintsize.h5
-./tools/testfiles/tscalarintattrsize.ddl
-./tools/testfiles/tscalarintattrsize.h5
-./tools/testfiles/tscalarintsize.ddl
-./tools/testfiles/tscalarintsize.h5
-./tools/testfiles/tscalarstring.ddl
-./tools/testfiles/tscalarstring.h5
-./tools/testfiles/tscaleoffset.ddl
-./tools/testfiles/tshuffle.ddl
-./tools/testfiles/tslink-1.ddl
-./tools/testfiles/tslink-2.ddl
-./tools/testfiles/tslink-D.ddl
-./tools/testfiles/tslink.h5
-./tools/testfiles/tsoftlinks.h5
-./tools/testfiles/tsplit_file-m.h5
-./tools/testfiles/tsplit_file-r.h5
-./tools/testfiles/tsplit_file.ddl
-./tools/testfiles/tstarfile.ddl
-./tools/testfiles/tstr.h5
-./tools/testfiles/tstr2.h5
-./tools/testfiles/tstr3.h5
-./tools/testfiles/tstr-1.ddl
-./tools/testfiles/tstr-2.ddl
-./tools/testfiles/tstr2bin2.exp
-./tools/testfiles/tstr2bin6.exp
-./tools/testfiles/tstring.ddl
-./tools/testfiles/tstring2.ddl
-./tools/testfiles/tstringe.ddl
-./tools/testfiles/tszip.ddl
-./tools/testfiles/tudfilter.ddl
-./tools/testfiles/tudfilter.h5
-./tools/testfiles/tudfilter.ls
-./tools/testfiles/tudlink.h5
-./tools/testfiles/tudlink.h5.xml
-./tools/testfiles/tudlink-1.ddl
-./tools/testfiles/tudlink-2.ddl
-./tools/testfiles/tuserfilter.ddl
-./tools/testfiles/tvldtypes1.ddl
-./tools/testfiles/tvldtypes1.h5
-./tools/testfiles/tvldtypes2.ddl
-./tools/testfiles/tvldtypes2.h5
-./tools/testfiles/tvldtypes3.ddl
-./tools/testfiles/tvldtypes3.h5
-./tools/testfiles/tvldtypes4.ddl
-./tools/testfiles/tvldtypes4.h5
-./tools/testfiles/tvldtypes5.ddl
-./tools/testfiles/tvldtypes5.h5
-./tools/testfiles/tvlenstr_array.ddl
-./tools/testfiles/tvlenstr_array.h5
-./tools/testfiles/tvlstr.h5
-./tools/testfiles/tvlstr.ddl
-./tools/testfiles/tvms.ddl
-./tools/testfiles/tvms.h5
-./tools/testfiles/twidedisplay.ddl
-./tools/testfiles/twithddl.exp
-./tools/testfiles/twithddlfile.ddl
-./tools/testfiles/twithddlfile.exp
-./tools/testfiles/tCVE_2018_11206_fill_old.h5
-./tools/testfiles/tCVE_2018_11206_fill_new.h5
-
-# h5dump test error files
-./tools/test/h5dump/errfiles/filter_fail.err
-./tools/test/h5dump/errfiles/non_existing.err
-./tools/test/h5dump/errfiles/tall-1.err
-./tools/test/h5dump/errfiles/tall-2A.err
-./tools/test/h5dump/errfiles/tall-2A0.err
-./tools/test/h5dump/errfiles/tall-2B.err
-./tools/test/h5dump/errfiles/tarray1_big.err
-./tools/test/h5dump/errfiles/tattr-3.err
-./tools/test/h5dump/errfiles/tattrregR.err
-./tools/test/h5dump/errfiles/tcomp-3.err
-./tools/test/h5dump/errfiles/tdataregR.err
-./tools/test/h5dump/errfiles/tdset-2.err
-./tools/test/h5dump/errfiles/texceedsubblock.err
-./tools/test/h5dump/errfiles/texceedsubcount.err
-./tools/test/h5dump/errfiles/texceedsubstart.err
-./tools/test/h5dump/errfiles/texceedsubstride.err
-./tools/test/h5dump/errfiles/textlink.err
-./tools/test/h5dump/errfiles/textlinkfar.err
-./tools/test/h5dump/errfiles/textlinksrc.err
-./tools/test/h5dump/errfiles/tgroup-2.err
-./tools/test/h5dump/errfiles/tnofilename-with-packed-bits.err
-./tools/test/h5dump/errfiles/torderlinks1.err
-./tools/test/h5dump/errfiles/torderlinks2.err
-./tools/test/h5dump/errfiles/tpbitsCharLengthExceeded.err
-./tools/test/h5dump/errfiles/tpbitsCharOffsetExceeded.err
-./tools/test/h5dump/errfiles/tpbitsIncomplete.err
-./tools/test/h5dump/errfiles/tpbitsIntLengthExceeded.err
-./tools/test/h5dump/errfiles/tpbitsIntOffsetExceeded.err
-./tools/test/h5dump/errfiles/tpbitsLengthExceeded.err
-./tools/test/h5dump/errfiles/tpbitsLengthPositive.err
-./tools/test/h5dump/errfiles/tpbitsLongLengthExceeded.err
-./tools/test/h5dump/errfiles/tpbitsLongOffsetExceeded.err
-./tools/test/h5dump/errfiles/tpbitsMaxExceeded.err
-./tools/test/h5dump/errfiles/tpbitsOffsetExceeded.err
-./tools/test/h5dump/errfiles/tpbitsOffsetNegative.err
-./tools/test/h5dump/errfiles/tperror.err
-./tools/test/h5dump/errfiles/tqmarkfile.err
-./tools/test/h5dump/errfiles/tslink-D.err
-
-# h5dump packed bits validation
-./tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
-./tools/testfiles/pbits/tpbitsLengthPositive.ddl
-./tools/testfiles/pbits/tpbitsMaxExceeded.ddl
-./tools/testfiles/pbits/tpbitsSigned.ddl
-./tools/testfiles/pbits/tpbitsSigned2.ddl
-./tools/testfiles/pbits/tpbitsSigned4.ddl
-./tools/testfiles/pbits/tpbitsSignedWhole.ddl
-./tools/testfiles/pbits/tpbitsSignedInt.ddl
-./tools/testfiles/pbits/tpbitsSignedInt4.ddl
-./tools/testfiles/pbits/tpbitsSignedInt8.ddl
-./tools/testfiles/pbits/tpbitsSignedIntWhole.ddl
-./tools/testfiles/pbits/tpbitsSignedLong.ddl
-./tools/testfiles/pbits/tpbitsSignedLong8.ddl
-./tools/testfiles/pbits/tpbitsSignedLong16.ddl
-./tools/testfiles/pbits/tpbitsSignedLongWhole.ddl
-./tools/testfiles/pbits/tpbitsSignedLongLong.ddl
-./tools/testfiles/pbits/tpbitsSignedLongLong16.ddl
-./tools/testfiles/pbits/tpbitsSignedLongLong32.ddl
-./tools/testfiles/pbits/tpbitsSignedLongLongWhole.ddl
-./tools/testfiles/pbits/tpbitsSignedLongLongWhole1.ddl
-./tools/testfiles/pbits/tpbitsSignedLongLongWhole63.ddl
-./tools/testfiles/pbits/tpbitsOffsetNegative.ddl
-./tools/testfiles/pbits/tpbitsUnsigned.ddl
-./tools/testfiles/pbits/tpbitsUnsigned2.ddl
-./tools/testfiles/pbits/tpbitsUnsigned4.ddl
-./tools/testfiles/pbits/tpbitsUnsignedWhole.ddl
-./tools/testfiles/pbits/tpbitsUnsignedInt.ddl
-./tools/testfiles/pbits/tpbitsUnsignedInt4.ddl
-./tools/testfiles/pbits/tpbitsUnsignedInt8.ddl
-./tools/testfiles/pbits/tpbitsUnsignedIntWhole.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLong.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLong8.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLong16.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongWhole.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongLong.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongLong16.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongLong32.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongLongWhole.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongLongWhole1.ddl
-./tools/testfiles/pbits/tpbitsUnsignedLongLongWhole63.ddl
-./tools/testfiles/pbits/tpbitsMax.ddl
-./tools/testfiles/pbits/tpbitsArray.ddl
-./tools/testfiles/pbits/tpbitsCompound.ddl
-./tools/testfiles/pbits/tpbitsIncomplete.ddl
-./tools/testfiles/pbits/tpbitsLengthExceeded.ddl
-./tools/testfiles/pbits/tpbitsCharLengthExceeded.ddl
-./tools/testfiles/pbits/tpbitsIntLengthExceeded.ddl
-./tools/testfiles/pbits/tpbitsLongLengthExceeded.ddl
-./tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
-./tools/testfiles/pbits/tpbitsCharOffsetExceeded.ddl
-./tools/testfiles/pbits/tpbitsIntOffsetExceeded.ddl
-./tools/testfiles/pbits/tpbitsLongOffsetExceeded.ddl
-./tools/testfiles/pbits/tpbitsOverlapped.ddl
-./tools/testfiles/packedbits.h5
-
-# h5dump vds validation
-./tools/testfiles/vds/tvds-1.ddl
-./tools/testfiles/vds/tvds-2.ddl
-./tools/testfiles/vds/tvds-3_1.ddl
-./tools/testfiles/vds/tvds-3_2.ddl
-./tools/testfiles/vds/tvds-4.ddl
-./tools/testfiles/vds/tvds-5.ddl
-./tools/testfiles/vds/tvds_layout-1.ddl
-./tools/testfiles/vds/tvds_layout-2.ddl
-./tools/testfiles/vds/tvds_layout-3_1.ddl
-./tools/testfiles/vds/tvds_layout-3_2.ddl
-./tools/testfiles/vds/tvds_layout-4.ddl
-./tools/testfiles/vds/tvds_layout-5.ddl
-./tools/testfiles/vds/vds-first.ddl
-./tools/testfiles/vds/vds-gap1.ddl
-./tools/testfiles/vds/vds-gap2.ddl
-./tools/testfiles/vds/vds_layout-eiger.ddl
-./tools/testfiles/vds/vds_layout-maxmin.ddl
-./tools/testfiles/vds/1_a.h5
-./tools/testfiles/vds/1_b.h5
-./tools/testfiles/vds/1_c.h5
-./tools/testfiles/vds/1_d.h5
-./tools/testfiles/vds/1_e.h5
-./tools/testfiles/vds/1_f.h5
-./tools/testfiles/vds/1_vds.h5
-./tools/testfiles/vds/2_a.h5
-./tools/testfiles/vds/2_b.h5
-./tools/testfiles/vds/2_c.h5
-./tools/testfiles/vds/2_d.h5
-./tools/testfiles/vds/2_e.h5
-./tools/testfiles/vds/2_vds.h5
-./tools/testfiles/vds/3_1_vds.h5
-./tools/testfiles/vds/3_2_vds.h5
-./tools/testfiles/vds/4_0.h5
-./tools/testfiles/vds/4_1.h5
-./tools/testfiles/vds/4_2.h5
-./tools/testfiles/vds/4_vds.h5
-./tools/testfiles/vds/5_a.h5
-./tools/testfiles/vds/5_b.h5
-./tools/testfiles/vds/5_c.h5
-./tools/testfiles/vds/5_vds.h5
-./tools/testfiles/vds/a.h5
-./tools/testfiles/vds/b.h5
-./tools/testfiles/vds/c.h5
-./tools/testfiles/vds/d.h5
-./tools/testfiles/vds/vds-percival-unlim-maxmin.h5
-./tools/testfiles/vds/f-0.h5
-./tools/testfiles/vds/f-3.h5
-./tools/testfiles/vds/vds-eiger.h5
-
-# h5dump h5import validation
-./tools/testfiles/out3.h5import
-./tools/testfiles/tordergr.h5
-./tools/testfiles/tordergr3.ddl
-./tools/testfiles/tordergr4.ddl
-./tools/testfiles/tordergr1.ddl
-./tools/testfiles/tordergr5.ddl
-./tools/testfiles/tordergr2.ddl
-./tools/testfiles/torderattr1.ddl
-./tools/testfiles/torderattr2.ddl
-./tools/testfiles/torderattr3.ddl
-./tools/testfiles/torderattr4.ddl
-./tools/testfiles/torderattr.h5
-./tools/testfiles/tfpformat.ddl
-./tools/testfiles/tfpformat.h5
-
-# h5dump h5repack validation
-./tools/testfiles/zerodim.ddl
-./tools/testfiles/zerodim.h5
-
-# h5dump new reference validation
-./tools/testfiles/trefer_attrR.ddl
-./tools/testfiles/trefer_compatR.ddl
-./tools/testfiles/trefer_extR.ddl
-./tools/testfiles/trefer_grpR.ddl
-./tools/testfiles/trefer_obj_delR.ddl
-./tools/testfiles/trefer_objR.ddl
-./tools/testfiles/trefer_paramR.ddl
-./tools/testfiles/trefer_reg_1dR.ddl
-./tools/testfiles/trefer_regR.ddl
-# h5dump and h5diff new reference files
-./tools/testfiles/trefer_attr.h5
-./tools/testfiles/trefer_compat.h5
-./tools/testfiles/trefer_ext1.h5
-./tools/testfiles/trefer_ext2.h5
-./tools/testfiles/trefer_grp.h5
-./tools/testfiles/trefer_obj_del.h5
-./tools/testfiles/trefer_obj.h5
-./tools/testfiles/trefer_param.h5
-./tools/testfiles/trefer_reg_1d.h5
-./tools/testfiles/trefer_reg.h5
-
-# Expected output from h5ls tests
-./tools/testfiles/nosuchfile.ls
-./tools/testfiles/help-1.ls
-./tools/testfiles/help-2.ls
-./tools/testfiles/help-3.ls
-./tools/testfiles/tall-1.ls
-./tools/testfiles/tall-2.ls
-./tools/testfiles/tcomp-1.ls
-./tools/testfiles/tdset-1.ls
-./tools/testfiles/tgroup-1.ls
-./tools/testfiles/tgroup-2.ls
-./tools/testfiles/tgroup-3.ls
-./tools/testfiles/tgroup.ls
-./tools/testfiles/tloop-1.ls
-./tools/testfiles/tnestcomp-1.ls
-./tools/testfiles/tnestcomp-2.ls
-./tools/testfiles/tnestcomp-3.ls
-./tools/testfiles/tnestcomp-4.ls
-./tools/testfiles/tsaf.ls
-./tools/testfiles/tstr-1.ls
-./tools/testfiles/tattr2.ls
-./tools/testfiles/tattrreg_le.ls
-./tools/testfiles/tattrreg_be.ls
-./tools/testfiles/tvldtypes1.ls
-./tools/testfiles/tvldtypes2le.ls
-./tools/testfiles/tvldtypes2be.ls
-./tools/testfiles/tdataregle.ls
-./tools/testfiles/tdataregbe.ls
-./tools/testfiles/tarray1.ls
-./tools/testfiles/tempty.ls
-./tools/testfiles/thlink-1.ls
-./tools/testfiles/tslink-1.ls
-./tools/testfiles/textlink-1.ls
-./tools/testfiles/textlinksrc-1.ls
-./tools/testfiles/textlinksrc-2.ls
-./tools/testfiles/textlinksrc-3.ls
-./tools/testfiles/textlinksrc-4.ls
-./tools/testfiles/textlinksrc-5.ls
-./tools/testfiles/textlinksrc-6.ls
-./tools/testfiles/textlinksrc-7.ls
-./tools/testfiles/textlinksrc-1-old.ls
-./tools/testfiles/textlinksrc-2-old.ls
-./tools/testfiles/textlinksrc-3-old.ls
-./tools/testfiles/textlinksrc-6-old.ls
-./tools/testfiles/textlinksrc-7-old.ls
-./tools/testfiles/tsoftlinks-1.ls
-./tools/testfiles/tsoftlinks-2.ls
-./tools/testfiles/tsoftlinks-3.ls
-./tools/testfiles/tsoftlinks-4.ls
-./tools/testfiles/tsoftlinks-5.ls
-./tools/testfiles/textlinksrc-nodangle-1.ls
-./tools/testfiles/textlinksrc-nodangle-2.ls
-./tools/testfiles/tsoftlinks-nodangle-1.ls
-./tools/testfiles/thlinks-nodangle-1.ls
-./tools/testfiles/tudlink-1.ls
-
-# h5ls vds validation
-./tools/testfiles/vds/tvds-1.ls
-./tools/testfiles/vds/tvds-2.ls
-./tools/testfiles/vds/tvds-3_1.ls
-./tools/testfiles/vds/tvds-3_2.ls
-./tools/testfiles/vds/tvds-4.ls
-./tools/testfiles/vds/tvds-5.ls
-
-#additional test input and output for h5dump XML
-./tools/testfiles/tall.h5.xml
-./tools/testfiles/tarray1.h5.xml
-./tools/testfiles/tarray2.h5.xml
-./tools/testfiles/tarray3.h5.xml
-./tools/testfiles/tarray6.h5.xml
-./tools/testfiles/tarray7.h5.xml
-./tools/testfiles/tattr.h5.xml
-./tools/testfiles/tbitfields_be.h5.xml
-./tools/testfiles/tbitfields_le.h5.xml
-./tools/testfiles/tcompound.h5.xml
-./tools/testfiles/tcompound2.h5.xml
-./tools/testfiles/tcompound_complex.h5.xml
-./tools/testfiles/tdatareg.h5.xml
-./tools/testfiles/tdset.h5.xml
-./tools/testfiles/tdset2.h5.xml
-./tools/testfiles/tempty.h5.xml
-./tools/testfiles/tenum.h5.xml
-./tools/testfiles/test35.nc
-./tools/testfiles/test35.nc.xml
-./tools/testfiles/tfpformat.h5.xml
-./tools/testfiles/tgroup.h5.xml
-./tools/testfiles/thlink.h5.xml
-./tools/testfiles/tloop.h5.xml
-./tools/testfiles/tloop2.h5.xml
-./tools/testfiles/tmany.h5.xml
-./tools/testfiles/tname-amp.h5
-./tools/testfiles/tname-amp.h5.xml
-./tools/testfiles/tname-apos.h5
-./tools/testfiles/tname-apos.h5.xml
-./tools/testfiles/tname-gt.h5
-./tools/testfiles/tname-gt.h5.xml
-./tools/testfiles/tname-lt.h5
-./tools/testfiles/tname-lt.h5.xml
-./tools/testfiles/tname-quot.h5
-./tools/testfiles/tname-quot.h5.xml
-./tools/testfiles/tname-sp.h5
-./tools/testfiles/tname-sp.h5.xml
-./tools/testfiles/tnamed_dtype_attr.ddl
-./tools/testfiles/tnamed_dtype_attr.h5
-./tools/testfiles/tnamed_dtype_attr.h5.xml
-./tools/testfiles/tnestedcomp.h5.xml
-./tools/testfiles/tnodata.h5
-./tools/testfiles/tnodata.h5.xml
-./tools/testfiles/tobjref.h5.xml
-./tools/testfiles/topaque.h5.xml
-./tools/testfiles/tref-escapes-at.h5
-./tools/testfiles/tref-escapes-at.h5.xml
-./tools/testfiles/tref-escapes.h5
-./tools/testfiles/tref-escapes.h5.xml
-./tools/testfiles/tref.h5
-./tools/testfiles/tref.h5.xml
-./tools/testfiles/tsaf.h5.xml
-./tools/testfiles/tslink.h5.xml
-./tools/testfiles/tstr.h5.xml
-./tools/testfiles/tstr2.h5.xml
-./tools/testfiles/tstring-at.h5
-./tools/testfiles/tstring-at.h5.xml
-./tools/testfiles/tstring.h5
-./tools/testfiles/tstring.h5.xml
-./tools/testfiles/tvldtypes1.h5.xml
-./tools/testfiles/tvldtypes2.h5.xml
-./tools/testfiles/tvldtypes3.h5.xml
-./tools/testfiles/tvldtypes4.h5.xml
-./tools/testfiles/tvldtypes5.h5.xml
-./tools/testfiles/tvlstr.h5.xml
-./tools/testfiles/tempty-dtd.h5.xml
-./tools/testfiles/tempty-dtd-uri.h5.xml
-./tools/testfiles/tempty-nons.h5.xml
-./tools/testfiles/tempty-nons-uri.h5.xml
-./tools/testfiles/tempty-ns.h5.xml
-./tools/testfiles/tempty-dtd-2.h5.xml
-./tools/testfiles/tempty-nons-2.h5.xml
-./tools/testfiles/tempty-ns-2.h5.xml
-./tools/testfiles/tall-2A.h5.xml
-./tools/testfiles/torderattr4.h5.xml
-./tools/testfiles/torderattr2.h5.xml
-./tools/testfiles/torderattr3.h5.xml
-./tools/testfiles/torderattr1.h5.xml
-
-
-#test files for h5diff
-./tools/test/h5diff/testfiles/h5diff_10.txt
-./tools/test/h5diff/testfiles/h5diff_11.txt
-./tools/test/h5diff/testfiles/h5diff_12.txt
-./tools/test/h5diff/testfiles/h5diff_13.txt
-./tools/test/h5diff/testfiles/h5diff_14.txt
-./tools/test/h5diff/testfiles/h5diff_15.txt
-./tools/test/h5diff/testfiles/h5diff_16_1.txt
-./tools/test/h5diff/testfiles/h5diff_16_2.txt
-./tools/test/h5diff/testfiles/h5diff_16_3.txt
-./tools/test/h5diff/testfiles/h5diff_17.txt
-./tools/test/h5diff/testfiles/h5diff_171.txt
-./tools/test/h5diff/testfiles/h5diff_172.txt
-./tools/test/h5diff/testfiles/h5diff_18.txt
-./tools/test/h5diff/testfiles/h5diff_18_1.txt
-./tools/test/h5diff/testfiles/h5diff_19.txt
-./tools/test/h5diff/testfiles/h5diff_20.txt
-./tools/test/h5diff/testfiles/h5diff_21.txt
-./tools/test/h5diff/testfiles/h5diff_22.txt
-./tools/test/h5diff/testfiles/h5diff_23.txt
-./tools/test/h5diff/testfiles/h5diff_24.txt
-./tools/test/h5diff/testfiles/h5diff_25.txt
-./tools/test/h5diff/testfiles/h5diff_26.txt
-./tools/test/h5diff/testfiles/h5diff_27.txt
-./tools/test/h5diff/testfiles/h5diff_28.txt
-./tools/test/h5diff/testfiles/h5diff_30.txt
-./tools/test/h5diff/testfiles/h5diff_50.txt
-./tools/test/h5diff/testfiles/h5diff_51.txt
-./tools/test/h5diff/testfiles/h5diff_52.txt
-./tools/test/h5diff/testfiles/h5diff_53.txt
-./tools/test/h5diff/testfiles/h5diff_54.txt
-./tools/test/h5diff/testfiles/h5diff_55.txt
-./tools/test/h5diff/testfiles/h5diff_56.txt
-./tools/test/h5diff/testfiles/h5diff_57.txt
-./tools/test/h5diff/testfiles/h5diff_58.txt
-./tools/test/h5diff/testfiles/h5diff_58_ref.txt
-./tools/test/h5diff/testfiles/h5diff_59.txt
-./tools/test/h5diff/testfiles/h5diff_60.txt
-./tools/test/h5diff/testfiles/h5diff_61.txt
-./tools/test/h5diff/testfiles/h5diff_62.txt
-./tools/test/h5diff/testfiles/h5diff_63.txt
-./tools/test/h5diff/testfiles/h5diff_600.txt
-./tools/test/h5diff/testfiles/h5diff_601.txt
-./tools/test/h5diff/testfiles/h5diff_601_ERR.err
-./tools/test/h5diff/testfiles/h5diff_603.txt
-./tools/test/h5diff/testfiles/h5diff_604.txt
-./tools/test/h5diff/testfiles/h5diff_605.txt
-./tools/test/h5diff/testfiles/h5diff_606.txt
-./tools/test/h5diff/testfiles/h5diff_607.txt
-./tools/test/h5diff/testfiles/h5diff_608.txt
-./tools/test/h5diff/testfiles/h5diff_609.txt
-./tools/test/h5diff/testfiles/h5diff_610.txt
-./tools/test/h5diff/testfiles/h5diff_612.txt
-./tools/test/h5diff/testfiles/h5diff_613.txt
-./tools/test/h5diff/testfiles/h5diff_614.txt
-./tools/test/h5diff/testfiles/h5diff_615.txt
-./tools/test/h5diff/testfiles/h5diff_616.txt
-./tools/test/h5diff/testfiles/h5diff_617.txt
-./tools/test/h5diff/testfiles/h5diff_618.txt
-./tools/test/h5diff/testfiles/h5diff_619.txt
-./tools/test/h5diff/testfiles/h5diff_621.txt
-./tools/test/h5diff/testfiles/h5diff_622.txt
-./tools/test/h5diff/testfiles/h5diff_623.txt
-./tools/test/h5diff/testfiles/h5diff_624.txt
-./tools/test/h5diff/testfiles/h5diff_625.txt
-./tools/test/h5diff/testfiles/h5diff_626.txt
-./tools/test/h5diff/testfiles/h5diff_627.txt
-./tools/test/h5diff/testfiles/h5diff_628.txt
-./tools/test/h5diff/testfiles/h5diff_629.txt
-./tools/test/h5diff/testfiles/h5diff_630.txt
-./tools/test/h5diff/testfiles/h5diff_631.txt
-./tools/test/h5diff/testfiles/h5diff_640.txt
-./tools/test/h5diff/testfiles/h5diff_641.txt
-./tools/test/h5diff/testfiles/h5diff_642.txt
-./tools/test/h5diff/testfiles/h5diff_643.txt
-./tools/test/h5diff/testfiles/h5diff_644.txt
-./tools/test/h5diff/testfiles/h5diff_645.txt
-./tools/test/h5diff/testfiles/h5diff_646.txt
-./tools/test/h5diff/testfiles/h5diff_70.txt
-./tools/test/h5diff/testfiles/h5diff_700.txt
-./tools/test/h5diff/testfiles/h5diff_701.txt
-./tools/test/h5diff/testfiles/h5diff_702.txt
-./tools/test/h5diff/testfiles/h5diff_703.txt
-./tools/test/h5diff/testfiles/h5diff_704.txt
-./tools/test/h5diff/testfiles/h5diff_705.txt
-./tools/test/h5diff/testfiles/h5diff_706.txt
-./tools/test/h5diff/testfiles/h5diff_707.txt
-./tools/test/h5diff/testfiles/h5diff_708.txt
-./tools/test/h5diff/testfiles/h5diff_709.txt
-./tools/test/h5diff/testfiles/h5diff_710.txt
-./tools/test/h5diff/testfiles/h5diff_80.txt
-./tools/test/h5diff/testfiles/h5diff_800.txt
-./tools/test/h5diff/testfiles/h5diff_801.txt
-./tools/test/h5diff/testfiles/h5diff_830.txt
-./tools/test/h5diff/testfiles/h5diff_90.txt
-./tools/test/h5diff/testfiles/h5diff_100.txt
-./tools/test/h5diff/testfiles/h5diff_101.txt
-./tools/test/h5diff/testfiles/h5diff_102.txt
-./tools/test/h5diff/testfiles/h5diff_103.txt
-./tools/test/h5diff/testfiles/h5diff_104.txt
-# w for Windows-specific
-./tools/test/h5diff/testfiles/h5diff_101w.txt
-./tools/test/h5diff/testfiles/h5diff_102w.txt
-./tools/test/h5diff/testfiles/h5diff_103w.txt
-./tools/test/h5diff/testfiles/h5diff_104w.txt
-./tools/test/h5diff/testfiles/h5diff_200.txt
-./tools/test/h5diff/testfiles/h5diff_201.txt
-./tools/test/h5diff/testfiles/h5diff_202.txt
-./tools/test/h5diff/testfiles/h5diff_203.txt
-./tools/test/h5diff/testfiles/h5diff_204.txt
-./tools/test/h5diff/testfiles/h5diff_205.txt
-./tools/test/h5diff/testfiles/h5diff_206.txt
-./tools/test/h5diff/testfiles/h5diff_207.txt
-./tools/test/h5diff/testfiles/h5diff_208.txt
-./tools/test/h5diff/testfiles/h5diff_220.txt
-./tools/test/h5diff/testfiles/h5diff_221.txt
-./tools/test/h5diff/testfiles/h5diff_222.txt
-./tools/test/h5diff/testfiles/h5diff_223.txt
-./tools/test/h5diff/testfiles/h5diff_224.txt
-./tools/test/h5diff/testfiles/h5diff_300.txt
-./tools/test/h5diff/testfiles/h5diff_400.txt
-./tools/test/h5diff/testfiles/h5diff_401.txt
-./tools/test/h5diff/testfiles/h5diff_402.txt
-./tools/test/h5diff/testfiles/h5diff_403.txt
-./tools/test/h5diff/testfiles/h5diff_404.txt
-./tools/test/h5diff/testfiles/h5diff_405.txt
-./tools/test/h5diff/testfiles/h5diff_406.txt
-./tools/test/h5diff/testfiles/h5diff_407.txt
-./tools/test/h5diff/testfiles/h5diff_408.txt
-./tools/test/h5diff/testfiles/h5diff_409.txt
-./tools/test/h5diff/testfiles/h5diff_410.txt
-./tools/test/h5diff/testfiles/h5diff_411.txt
-./tools/test/h5diff/testfiles/h5diff_412.txt
-./tools/test/h5diff/testfiles/h5diff_413.txt
-./tools/test/h5diff/testfiles/h5diff_414.txt
-./tools/test/h5diff/testfiles/h5diff_415.txt
-./tools/test/h5diff/testfiles/h5diff_416.txt
-./tools/test/h5diff/testfiles/h5diff_417.txt
-./tools/test/h5diff/testfiles/h5diff_418.txt
-./tools/test/h5diff/testfiles/h5diff_419.txt
-./tools/test/h5diff/testfiles/h5diff_420.txt
-./tools/test/h5diff/testfiles/h5diff_421.txt
-./tools/test/h5diff/testfiles/h5diff_422.txt
-./tools/test/h5diff/testfiles/h5diff_423.txt
-./tools/test/h5diff/testfiles/h5diff_424.txt
-./tools/test/h5diff/testfiles/h5diff_425.txt
-./tools/test/h5diff/testfiles/h5diff_450.txt
-./tools/test/h5diff/testfiles/h5diff_451.txt
-./tools/test/h5diff/testfiles/h5diff_452.txt
-./tools/test/h5diff/testfiles/h5diff_453.txt
-./tools/test/h5diff/testfiles/h5diff_454.txt
-./tools/test/h5diff/testfiles/dangling_link.err
-./tools/test/h5diff/testfiles/h5diff_455.txt
-./tools/test/h5diff/testfiles/h5diff_456.txt
-./tools/test/h5diff/testfiles/h5diff_457.txt
-./tools/test/h5diff/testfiles/h5diff_458.txt
-./tools/test/h5diff/testfiles/h5diff_459.txt
-./tools/test/h5diff/testfiles/h5diff_465.txt
-./tools/test/h5diff/testfiles/h5diff_466.txt
-./tools/test/h5diff/testfiles/h5diff_467.txt
-./tools/test/h5diff/testfiles/h5diff_468.txt
-./tools/test/h5diff/testfiles/h5diff_469.txt
-./tools/test/h5diff/testfiles/h5diff_471.txt
-./tools/test/h5diff/testfiles/h5diff_472.txt
-./tools/test/h5diff/testfiles/h5diff_473.txt
-./tools/test/h5diff/testfiles/h5diff_474.txt
-./tools/test/h5diff/testfiles/h5diff_475.txt
-./tools/test/h5diff/testfiles/h5diff_480.txt
-./tools/test/h5diff/testfiles/h5diff_481.txt
-./tools/test/h5diff/testfiles/h5diff_482.txt
-./tools/test/h5diff/testfiles/h5diff_483.txt
-./tools/test/h5diff/testfiles/h5diff_484.txt
-./tools/test/h5diff/testfiles/h5diff_485.txt
-./tools/test/h5diff/testfiles/h5diff_486.txt
-./tools/test/h5diff/testfiles/h5diff_487.txt
-./tools/test/h5diff/testfiles/h5diff_500.txt
-./tools/test/h5diff/testfiles/h5diff_501.txt
-./tools/test/h5diff/testfiles/h5diff_502.txt
-./tools/test/h5diff/testfiles/h5diff_503.txt
-./tools/test/h5diff/testfiles/h5diff_504.txt
-./tools/test/h5diff/testfiles/h5diff_505.txt
-./tools/test/h5diff/testfiles/h5diff_506.txt
-./tools/test/h5diff/testfiles/h5diff_507.txt
-./tools/test/h5diff/testfiles/h5diff_508.txt
-./tools/test/h5diff/testfiles/h5diff_509.txt
-./tools/test/h5diff/testfiles/h5diff_510.txt
-./tools/test/h5diff/testfiles/h5diff_511.txt
-./tools/test/h5diff/testfiles/h5diff_512.txt
-./tools/test/h5diff/testfiles/h5diff_513.txt
-./tools/test/h5diff/testfiles/h5diff_514.txt
-./tools/test/h5diff/testfiles/h5diff_515.txt
-./tools/test/h5diff/testfiles/h5diff_516.txt
-./tools/test/h5diff/testfiles/h5diff_517.txt
-./tools/test/h5diff/testfiles/h5diff_518.txt
-./tools/test/h5diff/testfiles/h5diff_530.txt
-./tools/test/h5diff/testfiles/h5diff_540.txt
-
-./tools/test/h5diff/testfiles/h5diff_attr1.h5
-./tools/test/h5diff/testfiles/h5diff_attr2.h5
-./tools/test/h5diff/testfiles/h5diff_attr3.h5
-./tools/test/h5diff/testfiles/h5diff_attr_v_level1.h5
-./tools/test/h5diff/testfiles/h5diff_attr_v_level2.h5
-./tools/test/h5diff/testfiles/h5diff_basic1.h5
-./tools/test/h5diff/testfiles/h5diff_basic2.h5
-./tools/test/h5diff/testfiles/h5diff_dset1.h5
-./tools/test/h5diff/testfiles/h5diff_dset2.h5
-./tools/test/h5diff/testfiles/h5diff_dset3.h5
-./tools/test/h5diff/testfiles/h5diff_dtypes.h5
-./tools/test/h5diff/testfiles/h5diff_empty.h5
-./tools/test/h5diff/testfiles/h5diff_eps1.h5
-./tools/test/h5diff/testfiles/h5diff_eps2.h5
-./tools/test/h5diff/testfiles/h5diff_hyper1.h5
-./tools/test/h5diff/testfiles/h5diff_hyper2.h5
-./tools/test/h5diff/testfiles/h5diff_types.h5
-./tools/test/h5diff/testfiles/h5diff_links.h5
-./tools/test/h5diff/testfiles/h5diff_ext2softlink_src.h5
-./tools/test/h5diff/testfiles/h5diff_ext2softlink_trg.h5
-./tools/test/h5diff/testfiles/h5diff_extlink_src.h5
-./tools/test/h5diff/testfiles/h5diff_extlink_trg.h5
-./tools/test/h5diff/testfiles/h5diff_linked_softlink.h5
-./tools/test/h5diff/testfiles/h5diff_softlinks.h5
-./tools/test/h5diff/testfiles/h5diff_danglelinks1.h5
-./tools/test/h5diff/testfiles/h5diff_danglelinks2.h5
-./tools/test/h5diff/testfiles/h5diff_grp_recurse1.h5
-./tools/test/h5diff/testfiles/h5diff_grp_recurse2.h5
-./tools/test/h5diff/testfiles/h5diff_grp_recurse_ext1.h5
-./tools/test/h5diff/testfiles/h5diff_grp_recurse_ext2-1.h5
-./tools/test/h5diff/testfiles/h5diff_grp_recurse_ext2-2.h5
-./tools/test/h5diff/testfiles/h5diff_grp_recurse_ext2-3.h5
-./tools/test/h5diff/testfiles/h5diff_exclude1-1.h5
-./tools/test/h5diff/testfiles/h5diff_exclude1-2.h5
-./tools/test/h5diff/testfiles/h5diff_exclude2-1.h5
-./tools/test/h5diff/testfiles/h5diff_exclude2-2.h5
-./tools/test/h5diff/testfiles/h5diff_exclude3-1.h5
-./tools/test/h5diff/testfiles/h5diff_exclude3-2.h5
-./tools/test/h5diff/testfiles/h5diff_comp_vl_strs.h5
-./tools/test/h5diff/testfiles/h5diff_dset_zero_dim_size1.h5
-./tools/test/h5diff/testfiles/h5diff_dset_zero_dim_size2.h5
-./tools/test/h5diff/testfiles/h5diff_enum_invalid_values.h5
-./tools/test/h5diff/testfiles/h5diff_eps.txt
-./tools/test/h5diff/testfiles/compounds_array_vlen1.h5
-./tools/test/h5diff/testfiles/compounds_array_vlen2.h5
-./tools/test/h5diff/testfiles/non_comparables1.h5
-./tools/test/h5diff/testfiles/non_comparables2.h5
-./tools/test/h5diff/testfiles/h5diff_8625.txt
-./tools/test/h5diff/testfiles/h5diff_8639.txt
-./tools/test/h5diff/testfiles/tudfilter.h5
-./tools/test/h5diff/testfiles/tudfilter2.h5
-./tools/test/h5diff/testfiles/h5diff_ud.txt
-./tools/test/h5diff/testfiles/h5diff_udfail.err
-./tools/test/h5diff/testfiles/h5diff_udfail.txt
-./tools/test/h5diff/testfiles/h5diff_strings1.h5
-./tools/test/h5diff/testfiles/h5diff_strings2.h5
-./tools/test/h5diff/testfiles/h5diff_vlstr.txt
-#vds
-./tools/test/h5diff/testfiles/h5diff_v1.txt
-./tools/test/h5diff/testfiles/h5diff_v2.txt
-./tools/test/h5diff/testfiles/h5diff_v3.txt
-#reg_ref
-./tools/test/h5diff/testfiles/h5diff_reg.txt
-
-#test files for h5repack
-./tools/test/h5repack/testfiles/README
-./tools/test/h5repack/testfiles/bounds_latest_latest.h5
-./tools/test/h5repack/testfiles/h5copy_extlinks_src.h5
-./tools/test/h5repack/testfiles/h5repack_aggr.h5
-./tools/test/h5repack/testfiles/h5repack_attr.h5
-./tools/test/h5repack/testfiles/h5repack_attr_refs.h5
-./tools/test/h5repack/testfiles/h5repack_deflate.h5
-./tools/test/h5repack/testfiles/h5repack_early.h5
-./tools/test/h5repack/testfiles/h5repack_ext.bin
-./tools/test/h5repack/testfiles/h5repack_ext.h5
-./tools/test/h5repack/testfiles/h5repack_f32le.h5
-./tools/test/h5repack/testfiles/h5repack_f32le_ex-0.dat
-./tools/test/h5repack/testfiles/h5repack_f32le_ex.h5
-./tools/test/h5repack/testfiles/h5repack_fill.h5
-./tools/test/h5repack/testfiles/h5repack_filters.h5
-./tools/test/h5repack/testfiles/h5repack_fletcher.h5
-./tools/test/h5repack/testfiles/h5repack_fsm_aggr_nopersist.h5
-./tools/test/h5repack/testfiles/h5repack_fsm_aggr_persist.h5
-./tools/test/h5repack/testfiles/h5repack_hlink.h5
-./tools/test/h5repack/testfiles/h5repack_int32le_1d.h5
-./tools/test/h5repack/testfiles/h5repack_int32le_1d_ex-0.dat
-./tools/test/h5repack/testfiles/h5repack_int32le_1d_ex-1.dat
-./tools/test/h5repack/testfiles/h5repack_int32le_1d_ex.h5
-./tools/test/h5repack/testfiles/h5repack_int32le_2d.h5
-./tools/test/h5repack/testfiles/h5repack_int32le_2d_ex-0.dat
-./tools/test/h5repack/testfiles/h5repack_int32le_2d_ex.h5
-./tools/test/h5repack/testfiles/h5repack_int32le_3d.h5
-./tools/test/h5repack/testfiles/h5repack_int32le_3d_ex-0.dat
-./tools/test/h5repack/testfiles/h5repack_int32le_3d_ex.h5
-./tools/test/h5repack/testfiles/h5repack.info
-./tools/test/h5repack/testfiles/h5repack_layout.h5
-./tools/test/h5repack/testfiles/h5repack_layouto.h5
-./tools/test/h5repack/testfiles/h5repack_layout2.h5
-./tools/test/h5repack/testfiles/h5repack_layout3.h5
-./tools/test/h5repack/testfiles/h5repack_layout.UD.h5
-./tools/test/h5repack/testfiles/h5repack_named_dtypes.h5
-./tools/test/h5repack/testfiles/h5repack_nested_8bit_enum_deflated.h5
-./tools/test/h5repack/testfiles/h5repack_nested_8bit_enum.h5
-./tools/test/h5repack/testfiles/h5repack_nbit.h5
-./tools/test/h5repack/testfiles/h5repack_none.h5
-./tools/test/h5repack/testfiles/h5repack_objs.h5
-./tools/test/h5repack/testfiles/h5repack_paged_nopersist.h5
-./tools/test/h5repack/testfiles/h5repack_paged_persist.h5
-./tools/test/h5repack/testfiles/h5repack_refs.h5
-./tools/test/h5repack/testfiles/h5repack_shuffle.h5
-./tools/test/h5repack/testfiles/h5repack_soffset.h5
-./tools/test/h5repack/testfiles/h5repack_szip.h5
-./tools/test/h5repack/testfiles/h5repack_uint8be.h5
-./tools/test/h5repack/testfiles/h5repack_uint8be_ex-0.dat
-./tools/test/h5repack/testfiles/h5repack_uint8be_ex-1.dat
-./tools/test/h5repack/testfiles/h5repack_uint8be_ex-2.dat
-./tools/test/h5repack/testfiles/h5repack_uint8be_ex-3.dat
-./tools/test/h5repack/testfiles/h5repack_uint8be_ex.h5
-./tools/test/h5repack/testfiles/ublock.bin
-./tools/test/h5repack/testfiles/crtorder.tordergr.h5.ddl
-./tools/test/h5repack/testfiles/deflate_limit.h5repack_layout.h5.ddl
-./tools/test/h5repack/testfiles/plugin_none.h5repack_layout.UD.h5.tst
-./tools/test/h5repack/testfiles/plugin_test.h5repack_layout.h5.tst
-./tools/test/h5repack/testfiles/plugin_zero.h5repack_layout.h5.tst
-./tools/test/h5repack/testfiles/plugin_version_test.h5repack_layout.h5.tst
-./tools/test/h5repack/testfiles/h5repack-help.txt
-./tools/test/h5repack/testfiles/h5repack_filters.h5-gzip_verbose_filters.tst
-./tools/test/h5repack/testfiles/h5repack_layout.h5-dset2_chunk_20x10-errstk.tst
-./tools/test/h5repack/testfiles/h5repack_layout.h5.ddl
-./tools/test/h5repack/testfiles/h5repack_layout.UD.h5-plugin_none.ddl
-./tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_test.ddl
-./tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
-./tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_zero.ddl
-./tools/test/h5repack/testfiles/h5repack_CVE-2018-17432.h5
-./tools/test/h5repack/testfiles/h5repack_CVE-2018-14460.h5
-./tools/test/h5repack/testfiles/GS.h5repack_paged_nopersist.h5.ddl
-./tools/test/h5repack/testfiles/S.h5repack_fsm_aggr_persist.h5.ddl
-./tools/test/h5repack/testfiles/SP.h5repack_fsm_aggr_nopersist.h5.ddl
-./tools/test/h5repack/testfiles/SP.h5repack_paged_persist.h5.ddl
-./tools/test/h5repack/testfiles/SPT.h5repack_aggr.h5.ddl
-./tools/test/h5repack/testfiles/STG.h5repack_none.h5.ddl
-./tools/test/h5repack/testfiles/1_vds.h5-vds_dset_chunk20x10x5-v.ddl
-./tools/test/h5repack/testfiles/2_vds.h5-vds_chunk3x6x9-v.ddl
-./tools/test/h5repack/testfiles/3_1_vds.h5-vds_chunk2x5x8-v.ddl
-./tools/test/h5repack/testfiles/4_vds.h5-vds_conti-v.ddl
-./tools/test/h5repack/testfiles/4_vds.h5-vds_compa-v.ddl
-./tools/test/h5repack/testfiles/attrregion.tattrreg.h5.ddl
-./tools/test/h5repack/testfiles/dataregion.tdatareg.h5.ddl
-./tools/test/h5repack/testfiles/textlink-base.textlink.h5.ddl
-./tools/test/h5repack/testfiles/textlink-merge.textlink.h5.tst
-./tools/test/h5repack/testfiles/textlink-mergeprune.textlink.h5.ddl
-./tools/test/h5repack/testfiles/textlink-prune.textlink.h5.ddl
-./tools/test/h5repack/testfiles/textlinkfar-base.textlinkfar.h5.ddl
-./tools/test/h5repack/testfiles/textlinkfar-merge.textlinkfar.h5.tst
-./tools/test/h5repack/testfiles/textlinkfar-mergeprune.textlinkfar.h5.ddl
-./tools/test/h5repack/testfiles/textlinkfar-prune.textlinkfar.h5.ddl
-./tools/test/h5repack/testfiles/textlinksrc-base.textlinksrc.h5.ddl
-./tools/test/h5repack/testfiles/textlinksrc-merge.textlinksrc.h5.tst
-./tools/test/h5repack/testfiles/textlinksrc-mergeprune.textlinksrc.h5.ddl
-./tools/test/h5repack/testfiles/textlinksrc-prune.textlinksrc.h5.ddl
-./tools/test/h5repack/testfiles/textlinktar-base.textlinktar.h5.ddl
-./tools/test/h5repack/testfiles/textlinktar-merge.textlinktar.h5.tst
-./tools/test/h5repack/testfiles/textlinktar-mergeprune.textlinktar.h5.ddl
-./tools/test/h5repack/testfiles/textlinktar-prune.textlinktar.h5.ddl
-./tools/test/h5repack/testfiles/tsoftlinks-base.tsoftlinks.h5.ddl
-./tools/test/h5repack/testfiles/tsoftlinks-merge.tsoftlinks.h5.tst
-./tools/test/h5repack/testfiles/tsoftlinks-mergeprune.tsoftlinks.h5.ddl
-./tools/test/h5repack/testfiles/tsoftlinks-prune.tsoftlinks.h5.ddl
-./tools/test/h5repack/testfiles/h5copy_extlinks_src-base.h5copy_extlinks_src.h5.ddl
-./tools/test/h5repack/testfiles/h5copy_extlinks_src-merge.h5copy_extlinks_src.h5.tst
-./tools/test/h5repack/testfiles/h5copy_extlinks_src-mergeprune.h5copy_extlinks_src.h5.ddl
-./tools/test/h5repack/testfiles/h5copy_extlinks_src-prune.h5copy_extlinks_src.h5.ddl
-./tools/test/h5repack/testfiles/h5copy_extlinks_trg.h5
-
-# jam utility and tests
-./tools/src/h5jam/Makefile.am
-./tools/src/h5jam/h5jam.c
-./tools/src/h5jam/h5unjam.c
-./tools/test/h5jam/Makefile.am
-./tools/test/h5jam/h5jamgentest.c
-./tools/test/h5jam/testh5jam.sh.in
-./tools/test/h5jam/tellub.c
-./tools/test/h5jam/getub.c
-
-# test files for jam
-./tools/test/h5jam/testfiles/h5jam-help.txt
-./tools/test/h5jam/testfiles/h5unjam-help.txt
-./tools/test/h5jam/testfiles/tall.h5
-./tools/test/h5jam/testfiles/twithub.h5
-./tools/test/h5jam/testfiles/twithub513.h5
-./tools/test/h5jam/testfiles/u10.txt
-./tools/test/h5jam/testfiles/u511.txt
-./tools/test/h5jam/testfiles/u512.txt
-./tools/test/h5jam/testfiles/u513.txt
-./tools/test/h5jam/testfiles/h5jam-ub-nohdf5.txt
-
-# test files for h5copy
-./tools/test/h5copy/testfiles/h5copytst.h5
-./tools/test/h5copy/testfiles/h5copytst.out.ls
-./tools/test/h5copy/testfiles/h5copy_ref.h5
-./tools/test/h5copy/testfiles/h5copy_ref.out.ls
-./tools/test/h5copy/testfiles/h5copy_extlinks_src.h5
-./tools/test/h5copy/testfiles/h5copy_extlinks_trg.h5
-./tools/test/h5copy/testfiles/h5copy_extlinks_src.out.ls
-./tools/test/h5copy/testfiles/h5copy_misc1.err
-./tools/test/h5copy/testfiles/h5copy_misc1.out
-./tools/test/h5copy/testfiles/h5copytst_new.h5
-./tools/test/h5copy/testfiles/h5copytst_new.out.ls
-./tools/test/h5copy/testfiles/h5copy_plugin_fail_ERR.out.h5.txt
-./tools/test/h5copy/testfiles/h5copy_plugin_test.out.h5.txt
-./tools/test/h5copy/testfiles/tudfilter.h5
-./tools/test/h5copy/testfiles/tudfilter2.h5
-./tools/test/h5copy/testfiles/tudfilter.h5.txt
-./tools/test/h5copy/testfiles/tudfilter.h5_ERR.txt
-
-# test files for h5mkgrp
-./tools/testfiles/h5mkgrp_nested_p.ls
-./tools/testfiles/h5mkgrp_nested_lp.ls
-./tools/testfiles/h5mkgrp_nested_mult_p.ls
-./tools/testfiles/h5mkgrp_nested_mult_lp.ls
-./tools/testfiles/h5mkgrp_several.ls
-./tools/testfiles/h5mkgrp_several_v.ls
-./tools/testfiles/h5mkgrp_several_p.ls
-./tools/testfiles/h5mkgrp_several_l.ls
-./tools/testfiles/h5mkgrp_single.ls
-./tools/testfiles/h5mkgrp_single_v.ls
-./tools/testfiles/h5mkgrp_single_p.ls
-./tools/testfiles/h5mkgrp_single_l.ls
-
-./tools/src/h5perf/Makefile.am
-./tools/src/h5perf/perf.c
-./tools/src/h5perf/pio_engine.c
-./tools/src/h5perf/pio_perf.c
-./tools/src/h5perf/pio_perf.h
-./tools/src/h5perf/sio_engine.c
-./tools/src/h5perf/sio_perf.c
-./tools/src/h5perf/sio_perf.h
-
-./tools/test/perform/Makefile.am
-./tools/test/perform/build_h5perf_alone.sh
-./tools/test/perform/build_h5perf_serial_alone.sh
-./tools/test/perform/chunk.c
-./tools/test/perform/chunk_cache.c
-./tools/test/perform/direct_write_perf.c
-./tools/test/perform/gen_report.pl
-./tools/test/perform/iopipe.c
-./tools/test/perform/overhead.c
-./tools/test/perform/perf_meta.c
-./tools/test/perform/pio_standalone.c
-./tools/test/perform/pio_standalone.h
-./tools/test/perform/sio_standalone.c
-./tools/test/perform/sio_standalone.h
-./tools/test/perform/zip_perf.c
-
-# Utils directory
-./utils/Makefile.am
-
-# Mirror VFD utilities
-./utils/mirror_vfd/Makefile.am
-./utils/mirror_vfd/mirror_remote.c
-./utils/mirror_vfd/mirror_remote.h
-./utils/mirror_vfd/mirror_server.c
-./utils/mirror_vfd/mirror_server_stop.c
-./utils/mirror_vfd/mirror_writer.c
-
-# test utilities
-./utils/test/Makefile.am
-./utils/test/swmr_check_compat_vfd.c
-
-# parallel tools (h5dwalk) and tests
-./utils/tools/CMakeLists.txt
-./utils/tools/Makefile.am
-./utils/tools/h5dwalk/CMakeLists.txt
-./utils/tools/h5dwalk/Makefile.am
-./utils/tools/h5dwalk/h5dwalk.1
-./utils/tools/h5dwalk/h5dwalk.c
-./utils/tools/test/CMakeLists.txt
-./utils/tools/test/Makefile.am
-./utils/tools/test/h5dwalk/CMakeLists.txt
-./utils/tools/test/h5dwalk/CMakeTests.cmake
-./utils/tools/test/h5dwalk/Makefile.am
-./utils/tools/test/h5dwalk/copy_demo_files.sh.in
-./utils/tools/test/h5dwalk/help.h5dwalk
-./utils/tools/test/h5dwalk/testh5dwalk.sh.in
-
-# high level libraries
-./hl/Makefile.am
-./hl/examples/Makefile.am
-./hl/examples/ex_ds1.c
-./hl/examples/ex_lite1.c
-./hl/examples/ex_lite2.c
-./hl/examples/ex_lite3.c
-./hl/examples/ex_image1.c
-./hl/examples/ex_image2.c
-./hl/examples/ex_table_01.c
-./hl/examples/ex_table_02.c
-./hl/examples/ex_table_03.c
-./hl/examples/ex_table_04.c
-./hl/examples/ex_table_05.c
-./hl/examples/ex_table_06.c
-./hl/examples/ex_table_07.c
-./hl/examples/ex_table_08.c
-./hl/examples/ex_table_09.c
-./hl/examples/ex_table_10.c
-./hl/examples/ex_table_11.c
-./hl/examples/ex_table_12.c
-./hl/examples/image24pixel.txt
-./hl/examples/image8.txt
-./hl/examples/pal_rgb.h
-./hl/examples/ptExampleFL.c
-./hl/examples/run-hl-ex.sh
-./hl/examples/run-hlc-ex.sh.in
-./hl/src/Makefile.am
-./hl/src/H5DO.c
-./hl/src/H5DOpublic.h
-./hl/src/H5DS.c
-./hl/src/H5DSprivate.h
-./hl/src/H5DSpublic.h
-./hl/src/H5HLprivate2.h
-./hl/src/H5IM.c
-./hl/src/H5IMprivate.h
-./hl/src/H5IMpublic.h
-./hl/src/H5LD.c
-./hl/src/H5LDprivate.h
-./hl/src/H5LDpublic.h
-./hl/src/H5LT.c
-./hl/src/H5LTanalyze.c
-./hl/src/H5LTanalyze.l
-./hl/src/H5LTparse.c
-./hl/src/H5LTparse.h
-./hl/src/H5LTparse.y
-./hl/src/H5LTprivate.h
-./hl/src/H5LTpublic.h
-./hl/src/H5PT.c
-./hl/src/H5PTprivate.h
-./hl/src/H5PTpublic.h
-./hl/src/H5TB.c
-./hl/src/H5TBprivate.h
-./hl/src/H5TBpublic.h
-./hl/src/hdf5_hl.h
-./hl/test/H5srcdir_str.h.in
-./hl/test/Makefile.am
-./hl/test/dsdata.txt
-./hl/test/dslat.txt
-./hl/test/dslon.txt
-./hl/test/dtype_file.txt
-./hl/test/dtype_file_readable.txt
-./hl/test/earth.pal
-./hl/test/gen_test_ds.c
-./hl/test/gen_test_ld.c
-./hl/test/h5hltest.h
-./hl/test/image24pixel.txt
-./hl/test/image24plane.txt
-./hl/test/image8.txt
-./hl/test/pal_rgb.h
-./hl/test/sepia.pal
-./hl/test/test_ds.c
-./hl/test/test_ds_be.h5
-./hl/test/test_ds_be_new_ref.h5
-./hl/test/test_ds_be_new_ref-32bit.h5
-./hl/test/test_ds_le.h5
-./hl/test/test_ds_le_new_ref.h5
-./hl/test/test_dset_append.c
-./hl/test/test_file_image.c
-./hl/test/test_h5do_compat.c
-./hl/test/test_image.c
-./hl/test/test_ld.c
-./hl/test/test_ld.h5
-./hl/test/test_lite.c
-./hl/test/test_packet.c
-./hl/test/test_packet_vlen.c
-./hl/test/test_table.c
-./hl/test/test_table_be.h5
-./hl/test/test_table_cray.h5
-./hl/test/test_table_le.h5
-./hl/test/usa.wri
-
-# tools
-./hl/tools/Makefile.am
-./hl/tools/gif2h5/Makefile.am
-./hl/tools/gif2h5/decompress.c
-./hl/tools/gif2h5/gif.h
-./hl/tools/gif2h5/gif2hdf.c
-./hl/tools/gif2h5/gif2mem.c
-./hl/tools/gif2h5/gifread.c
-./hl/tools/gif2h5/hdf2gif.c
-./hl/tools/gif2h5/hdfgifwr.c
-./hl/tools/gif2h5/writehdf.c
-./hl/tools/gif2h5/h52gifgentst.c
-./hl/tools/gif2h5/h52giftest.sh.in
-./hl/tools/gif2h5/testfiles/README
-./hl/tools/gif2h5/testfiles/ex_image2.h5
-./hl/tools/gif2h5/testfiles/image1.gif
-./hl/tools/gif2h5/testfiles/h52giftst.h5
-#
-./hl/tools/h5watch/Makefile.am
-./hl/tools/h5watch/extend_dset.c
-./hl/tools/h5watch/h5watch.c
-./hl/tools/h5watch/h5watchgentest.c
-./hl/tools/h5watch/testh5watch.sh.in
-
-# expected test output from testing h5watch
-#
-./hl/tools/testfiles/w-err-std.ddl
-./hl/tools/testfiles/w-err-cmpd1.err
-./hl/tools/testfiles/w-err-cmpd2.err
-./hl/tools/testfiles/w-err-cmpd3.err
-./hl/tools/testfiles/w-err-cmpd4.err
-./hl/tools/testfiles/w-err-cmpd5.err
-./hl/tools/testfiles/w-err-dset1.err
-./hl/tools/testfiles/w-err-dset2.err
-./hl/tools/testfiles/w-err-dset-nomax.err
-./hl/tools/testfiles/w-err-dset-none.err
-./hl/tools/testfiles/w-err-file.err
-./hl/tools/testfiles/w-err-poll0.ddl
-./hl/tools/testfiles/w-err-poll.ddl
-./hl/tools/testfiles/w-err-width.ddl
-./hl/tools/testfiles/w-ext-cmpd.ddl
-./hl/tools/testfiles/w-ext-cmpd-esc.ddl
-./hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
-./hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
-./hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
-./hl/tools/testfiles/w-ext-cmpd-f1.ddl
-./hl/tools/testfiles/w-ext-cmpd-f2.ddl
-./hl/tools/testfiles/w-ext-cmpd-ff3.ddl
-./hl/tools/testfiles/w-ext-cmpd-label.ddl
-./hl/tools/testfiles/w-ext-cmpd-two.ddl
-./hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
-./hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
-./hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
-./hl/tools/testfiles/w-ext-early.ddl
-./hl/tools/testfiles/w-ext-late.ddl
-./hl/tools/testfiles/w-ext-one-d.ddl
-./hl/tools/testfiles/w-ext-one.ddl
-./hl/tools/testfiles/w-ext-one-simple.ddl
-./hl/tools/testfiles/w-ext-two-d.ddl
-./hl/tools/testfiles/w-ext-two.ddl
-./hl/tools/testfiles/w-ext-two-width.ddl
-./hl/tools/testfiles/w-help1.ddl
-
-# hl fortran
-./hl/fortran/Makefile.am
-./hl/fortran/examples/Makefile.am
-./hl/fortran/examples/run-hlfortran-ex.sh.in
-./hl/fortran/src/hdf5_hl_fortrandll.def.in
-./hl/fortran/src/H5DSfc.c
-./hl/fortran/src/H5DSff.F90
-./hl/fortran/src/H5HL_buildiface.F90
-./hl/fortran/src/H5IMcc.c
-./hl/fortran/src/H5IMcc.h
-./hl/fortran/src/H5IMfc.c
-./hl/fortran/src/H5IMff.F90
-./hl/fortran/src/H5LTf90proto.h
-./hl/fortran/src/H5LTfc.c
-./hl/fortran/src/H5LTff.F90
-./hl/fortran/src/H5TBfc.c
-./hl/fortran/src/H5TBff.F90
-./hl/fortran/src/Makefile.am
-./hl/fortran/test/Makefile.am
-./hl/fortran/test/tstds.F90
-./hl/fortran/test/tstimage.F90
-./hl/fortran/test/tstlite.F90
-./hl/fortran/test/tsttable.F90
-
-# hl c++
-./hl/c++/Makefile.am
-./hl/c++/examples/Makefile.am
-./hl/c++/examples/ptExampleFL.cpp
-./hl/c++/examples/run-hlc++-ex.sh.in
-./hl/c++/src/H5PacketTable.h
-./hl/c++/src/H5PacketTable.cpp
-./hl/c++/src/Makefile.am
-./hl/c++/test/ptableTest.h
-./hl/c++/test/ptableTest.cpp
-./hl/c++/test/Makefile.am
-
-# java
-./java/Makefile.am
-./java/CMakeLists.txt
-
-./java/src/Makefile.am
-./java/src/CMakeLists.txt
-./java/src/jni/Makefile.am
-./java/src/jni/CMakeLists.txt
-./java/src/jni/exceptionImp.c
-./java/src/jni/exceptionImp.h
-./java/src/jni/h5Constants.c
-./java/src/jni/nativeData.c
-./java/src/jni/nativeData.h
-./java/src/jni/h5jni.h
-./java/src/jni/h5util.c
-./java/src/jni/h5util.h
-./java/src/jni/h5Imp.c
-./java/src/jni/h5Imp.h
-./java/src/jni/h5aImp.c
-./java/src/jni/h5aImp.h
-./java/src/jni/h5dImp.c
-./java/src/jni/h5dImp.h
-./java/src/jni/h5eImp.c
-./java/src/jni/h5eImp.h
-./java/src/jni/h5fImp.c
-./java/src/jni/h5fImp.h
-./java/src/jni/h5gImp.c
-./java/src/jni/h5gImp.h
-./java/src/jni/h5iImp.c
-./java/src/jni/h5iImp.h
-./java/src/jni/h5lImp.c
-./java/src/jni/h5lImp.h
-./java/src/jni/h5oImp.c
-./java/src/jni/h5oImp.h
-./java/src/jni/h5pImp.c
-./java/src/jni/h5pImp.h
-./java/src/jni/h5pACPLImp.c
-./java/src/jni/h5pACPLImp.h
-./java/src/jni/h5pDAPLImp.c
-./java/src/jni/h5pDAPLImp.h
-./java/src/jni/h5pDCPLImp.c
-./java/src/jni/h5pDCPLImp.h
-./java/src/jni/h5pDXPLImp.c
-./java/src/jni/h5pDXPLImp.h
-./java/src/jni/h5pFAPLImp.c
-./java/src/jni/h5pFAPLImp.h
-./java/src/jni/h5pFCPLImp.c
-./java/src/jni/h5pFCPLImp.h
-./java/src/jni/h5pGAPLImp.c
-./java/src/jni/h5pGAPLImp.h
-./java/src/jni/h5pGCPLImp.c
-./java/src/jni/h5pGCPLImp.h
-./java/src/jni/h5pLAPLImp.c
-./java/src/jni/h5pLAPLImp.h
-./java/src/jni/h5pLCPLImp.c
-./java/src/jni/h5pLCPLImp.h
-./java/src/jni/h5pOCPLImp.c
-./java/src/jni/h5pOCPLImp.h
-./java/src/jni/h5pOCpyPLImp.c
-./java/src/jni/h5pOCpyPLImp.h
-./java/src/jni/h5pStrCPLImp.c
-./java/src/jni/h5pStrCPLImp.h
-./java/src/jni/h5plImp.c
-./java/src/jni/h5plImp.h
-./java/src/jni/h5rImp.c
-./java/src/jni/h5rImp.h
-./java/src/jni/h5sImp.c
-./java/src/jni/h5sImp.h
-./java/src/jni/h5tImp.c
-./java/src/jni/h5tImp.h
-./java/src/jni/h5vlImp.c
-./java/src/jni/h5vlImp.h
-./java/src/jni/h5zImp.c
-./java/src/jni/h5zImp.h
-
-./java/src/hdf/CMakeLists.txt
-./java/src/hdf/overview.html
-./java/src/hdf/hdf5lib/CMakeLists.txt
-
-./java/src/hdf/hdf5lib/callbacks/Callbacks.java
-./java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5A_iterate_t.java
-./java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5D_iterate_t.java
-./java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5D_append_t.java
-./java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5E_walk_t.java
-./java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java
-./java/src/hdf/hdf5lib/callbacks/H5L_iterate_opdata_t.java
-./java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java
-./java/src/hdf/hdf5lib/callbacks/H5O_iterate_opdata_t.java
-./java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_t.java
-./java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_t.java
-./java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_t.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java
-./java/src/hdf/hdf5lib/callbacks/H5P_iterate_t.java
-./java/src/hdf/hdf5lib/callbacks/package-info.java
-
-./java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5Exception.java
-./java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5IdException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java
-./java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java
-./java/src/hdf/hdf5lib/exceptions/package-info.java
-
-./java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
-./java/src/hdf/hdf5lib/structs/H5A_info_t.java
-./java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
-./java/src/hdf/hdf5lib/structs/H5E_error2_t.java
-./java/src/hdf/hdf5lib/structs/H5F_info2_t.java
-./java/src/hdf/hdf5lib/structs/H5FD_hdfs_fapl_t.java
-./java/src/hdf/hdf5lib/structs/H5FD_ros3_fapl_t.java
-./java/src/hdf/hdf5lib/structs/H5G_info_t.java
-./java/src/hdf/hdf5lib/structs/H5L_info_t.java
-./java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
-./java/src/hdf/hdf5lib/structs/H5O_info_t.java
-./java/src/hdf/hdf5lib/structs/H5O_native_info_t.java
-./java/src/hdf/hdf5lib/structs/H5O_token_t.java
-./java/src/hdf/hdf5lib/structs/package-info.java
-
-./java/src/hdf/hdf5lib/H5.java
-./java/src/hdf/hdf5lib/HDF5Constants.java
-./java/src/hdf/hdf5lib/HDF5GroupInfo.java
-./java/src/hdf/hdf5lib/HDFArray.java
-./java/src/hdf/hdf5lib/HDFNativeData.java
-./java/src/hdf/hdf5lib/package-info.java
-
-./java/examples/Makefile.am
-./java/examples/CMakeLists.txt
-
-./java/examples/intro/Makefile.am
-./java/examples/intro/CMakeLists.txt
-./java/examples/intro/JavaIntroExample.sh.in
-./java/examples/intro/H5_CreateAttribute.java
-./java/examples/intro/H5_CreateDataset.java
-./java/examples/intro/H5_CreateFile.java
-./java/examples/intro/H5_CreateGroup.java
-./java/examples/intro/H5_CreateGroupAbsoluteRelative.java
-./java/examples/intro/H5_CreateGroupDataset.java
-./java/examples/intro/H5_ReadWrite.java
-
-./java/examples/groups/Makefile.am
-./java/examples/groups/CMakeLists.txt
-./java/examples/groups/JavaGroupExample.sh.in
-./java/examples/groups/H5Ex_G_Create.java
-./java/examples/groups/H5Ex_G_Iterate.java
-./java/examples/groups/H5Ex_G_Compact.java
-./java/examples/groups/H5Ex_G_Corder.java
-./java/examples/groups/H5Ex_G_Intermediate.java
-./java/examples/groups/H5Ex_G_Phase.java
-./java/examples/groups/H5Ex_G_Traverse.java
-./java/examples/groups/H5Ex_G_Visit.java
-./java/examples/groups/h5ex_g_iterate.h5
-./java/examples/groups/h5ex_g_visit.h5
-
-./java/examples/datasets/Makefile.am
-./java/examples/datasets/CMakeLists.txt
-./java/examples/datasets/JavaDatasetExample.sh.in
-./java/examples/datasets/H5Ex_D_Alloc.java
-./java/examples/datasets/H5Ex_D_Checksum.java
-./java/examples/datasets/H5Ex_D_Chunk.java
-./java/examples/datasets/H5Ex_D_Compact.java
-./java/examples/datasets/H5Ex_D_External.java
-./java/examples/datasets/H5Ex_D_FillValue.java
-./java/examples/datasets/H5Ex_D_Gzip.java
-./java/examples/datasets/H5Ex_D_Hyperslab.java
-./java/examples/datasets/H5Ex_D_ReadWrite.java
-./java/examples/datasets/H5Ex_D_Shuffle.java
-./java/examples/datasets/H5Ex_D_Szip.java
-./java/examples/datasets/H5Ex_D_UnlimitedAdd.java
-./java/examples/datasets/H5Ex_D_UnlimitedGzip.java
-./java/examples/datasets/H5Ex_D_UnlimitedMod.java
-./java/examples/datasets/H5Ex_D_Nbit.java
-./java/examples/datasets/H5Ex_D_Transform.java
-./java/examples/datasets/H5Ex_D_Sofloat.java
-./java/examples/datasets/H5Ex_D_Soint.java
-
-./java/examples/datatypes/Makefile.am
-./java/examples/datatypes/CMakeLists.txt
-./java/examples/datatypes/JavaDatatypeExample.sh.in
-./java/examples/datatypes/H5Ex_T_Array.java
-./java/examples/datatypes/H5Ex_T_ArrayAttribute.java
-./java/examples/datatypes/H5Ex_T_Bit.java
-./java/examples/datatypes/H5Ex_T_BitAttribute.java
-./java/examples/datatypes/H5Ex_T_Commit.java
-./java/examples/datatypes/H5Ex_T_Compound.java
-./java/examples/datatypes/H5Ex_T_CompoundAttribute.java
-./java/examples/datatypes/H5Ex_T_Float.java
-./java/examples/datatypes/H5Ex_T_FloatAttribute.java
-./java/examples/datatypes/H5Ex_T_Integer.java
-./java/examples/datatypes/H5Ex_T_IntegerAttribute.java
-./java/examples/datatypes/H5Ex_T_ObjectReference.java
-./java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
-./java/examples/datatypes/H5Ex_T_Opaque.java
-./java/examples/datatypes/H5Ex_T_OpaqueAttribute.java
-./java/examples/datatypes/H5Ex_T_String.java
-./java/examples/datatypes/H5Ex_T_StringAttribute.java
-./java/examples/datatypes/H5Ex_T_VLString.java
-
-./java/examples/testfiles/examples.intro.H5_CreateAttribute.txt
-./java/examples/testfiles/examples.intro.H5_CreateDataset.txt
-./java/examples/testfiles/examples.intro.H5_CreateFile.txt
-./java/examples/testfiles/examples.intro.H5_CreateGroup.txt
-./java/examples/testfiles/examples.intro.H5_CreateGroupAbsoluteRelative.txt
-./java/examples/testfiles/examples.intro.H5_CreateGroupDataset.txt
-./java/examples/testfiles/examples.intro.H5_ReadWrite.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Create.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Iterate.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Compact.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Corder.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Intermediate.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Phase.txt
-./java/examples/testfiles/examples.groups.H5Ex_G_Visit.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Alloc.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Checksum.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Chunk.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Compact.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_External.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_FillValue.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Gzip.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Hyperslab.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_ReadWrite.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Shuffle.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Szip.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_UnlimitedAdd.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_UnlimitedGzip.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_UnlimitedMod.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Nbit.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Transform.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Sofloat.txt
-./java/examples/testfiles/examples.datasets.H5Ex_D_Soint.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Array.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_ArrayAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Bit.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_BitAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Commit.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Compound.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_CompoundAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Float.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_FloatAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Integer.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_IntegerAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_ObjectReference.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_ObjectReferenceAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_Opaque.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_OpaqueAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_String.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_StringAttribute.txt
-./java/examples/testfiles/examples.datatypes.H5Ex_T_VLString.txt
-
-./java/test/Makefile.am
-./java/test/CMakeLists.txt
-./java/test/junit.sh.in
-./java/test/testfiles/JUnit-TestH5.txt
-./java/test/testfiles/JUnit-TestH5A.txt
-./java/test/testfiles/JUnit-TestH5Arw.txt
-./java/test/testfiles/JUnit-TestH5Dparams.txt
-./java/test/testfiles/JUnit-TestH5D.txt
-./java/test/testfiles/JUnit-TestH5Dplist.txt
-./java/test/testfiles/JUnit-TestH5Drw.txt
-./java/test/testfiles/JUnit-TestH5E.txt
-./java/test/testfiles/JUnit-TestH5Edefault.txt
-./java/test/testfiles/JUnit-TestH5Eparams.txt
-./java/test/testfiles/JUnit-TestH5Eregister.txt
-./java/test/testfiles/JUnit-TestH5Fparams.txt
-./java/test/testfiles/JUnit-TestH5Fbasic.txt
-./java/test/testfiles/JUnit-TestH5F.txt
-./java/test/testfiles/JUnit-TestH5Fswmr.txt
-./java/test/testfiles/JUnit-TestH5Gbasic.txt
-./java/test/testfiles/JUnit-TestH5G.txt
-./java/test/testfiles/JUnit-TestH5Giterate.txt
-./java/test/testfiles/JUnit-TestH5Lparams.txt
-./java/test/testfiles/JUnit-TestH5Lbasic.txt
-./java/test/testfiles/JUnit-TestH5Lcreate.txt
-./java/test/testfiles/JUnit-TestH5Oparams.txt
-./java/test/testfiles/JUnit-TestH5Obasic.txt
-./java/test/testfiles/JUnit-TestH5Ocreate.txt
-./java/test/testfiles/JUnit-TestH5Ocopy.txt
-./java/test/testfiles/JUnit-TestH5P.txt
-./java/test/testfiles/JUnit-TestH5PData.txt
-./java/test/testfiles/JUnit-TestH5Pfapl.txt
-./java/test/testfiles/JUnit-TestH5Pfaplhdfs.txt
-./java/test/testfiles/JUnit-TestH5Pfapls3.txt
-./java/test/testfiles/JUnit-TestH5Plist.txt
-./java/test/testfiles/JUnit-TestH5Pvirtual.txt
-./java/test/testfiles/JUnit-TestH5PL.txt
-./java/test/testfiles/JUnit-TestH5R.txt
-./java/test/testfiles/JUnit-TestH5Rref.txt
-./java/test/testfiles/JUnit-TestH5Sbasic.txt
-./java/test/testfiles/JUnit-TestH5S.txt
-./java/test/testfiles/JUnit-TestH5Tparams.txt
-./java/test/testfiles/JUnit-TestH5Tbasic.txt
-./java/test/testfiles/JUnit-TestH5T.txt
-./java/test/testfiles/JUnit-TestH5VL.txt
-./java/test/testfiles/JUnit-TestH5Z.txt
-./java/test/h5ex_g_iterate.orig
-./java/test/TestH5.java
-./java/test/TestH5A.java
-./java/test/TestH5Arw.java
-./java/test/TestH5Dparams.java
-./java/test/TestH5D.java
-./java/test/TestH5Dplist.java
-./java/test/TestH5Drw.java
-./java/test/TestH5E.java
-./java/test/TestH5Edefault.java
-./java/test/TestH5Eparams.java
-./java/test/TestH5Eregister.java
-./java/test/TestH5Fparams.java
-./java/test/TestH5Fbasic.java
-./java/test/TestH5F.java
-./java/test/TestH5Fswmr.java
-./java/test/TestH5Gbasic.java
-./java/test/TestH5G.java
-./java/test/TestH5Giterate.java
-./java/test/TestH5Lparams.java
-./java/test/TestH5Lbasic.java
-./java/test/TestH5Lcreate.java
-./java/test/TestH5Oparams.java
-./java/test/TestH5Obasic.java
-./java/test/TestH5Ocreate.java
-./java/test/TestH5Ocopy.java
-./java/test/TestH5P.java
-./java/test/TestH5PData.java
-./java/test/TestH5Pfapl.java
-./java/test/TestH5Pfaplhdfs.java
-./java/test/TestH5Pfapls3.java
-./java/test/TestH5Plist.java
-./java/test/TestH5Pvirtual.java
-./java/test/TestH5PL.java
-./java/test/TestH5R.java
-./java/test/TestH5Rref.java
-./java/test/TestH5Sbasic.java
-./java/test/TestH5S.java
-./java/test/TestH5Tparams.java
-./java/test/TestH5Tbasic.java
-./java/test/TestH5T.java
-./java/test/TestH5VL.java
-./java/test/TestH5Z.java
-./java/test/TestAll.java
-
-./java/lib/hamcrest-core.jar
-./java/lib/junit.jar
-./java/lib/simplelogger.properties
-./java/lib/slf4j-api-1.7.25.jar
-./java/lib/ext/slf4j-nop-1.7.25.jar
-./java/lib/ext/slf4j-simple-1.7.25.jar
-
-# CMake-specific Files
-./config/toolchain/build32.cmake
-./config/toolchain/clang.cmake
-./config/toolchain/crayle.cmake
-./config/toolchain/GCC.cmake
-./config/toolchain/intel.cmake
-./config/toolchain/mingw64.cmake
-./config/toolchain/PGI.cmake
-
-./config/cmake/cacheinit.cmake
-./config/cmake/CMakeFindJavaCommon.cmake
-./config/cmake/ConversionTests.c
-./config/cmake/ConfigureChecks.cmake
-./config/cmake/CPack.Info.plist.in
-./config/cmake/CTestCustom.cmake
-./config/cmake/fileCompareTest.cmake
-./config/cmake/FindHDFS.cmake
-./config/cmake/H5pubconf.h.in
-./config/cmake/hdf5-config.cmake.in
-./config/cmake/hdf5-config-version.cmake.in
-./config/cmake/HDFCompilerFlags.cmake
-./config/cmake/HDFCXXCompilerFlags.cmake
-./config/cmake/HDFFortranCompilerFlags.cmake
-./config/cmake/HDF5Macros.cmake
-./config/cmake/HDF5PluginMacros.cmake
-./config/cmake/HDF5PluginCache.cmake
-./config/cmake/HDF5UseFortran.cmake
-./config/cmake/javaTargets.cmake.in
-./config/cmake/jrunTest.cmake
-./config/cmake/libh5cc.in
-./config/cmake/libhdf5.settings.cmake.in
-./config/cmake/mccacheinit.cmake
-./config/cmake/patch.xml
-./config/cmake/PkgInfo.in
-./config/cmake/README.txt.cmake.in
-./config/cmake/UseJava.cmake
-./config/cmake/UseJavaClassFilelist.cmake
-./config/cmake/UseJavaSymlinks.cmake
-./config/cmake/userblockTest.cmake
-./config/cmake/vfdTest.cmake
-./config/cmake/volTest.cmake
-./config/cmake/wait_H5Tinit.cmake
-
-./config/cmake_ext_mod/ConfigureChecks.cmake
-./config/cmake_ext_mod/CTestCustom.cmake
-./config/cmake_ext_mod/FindSZIP.cmake
-./config/cmake_ext_mod/GetTimeOfDayTest.cpp
-./config/cmake_ext_mod/grepTest.cmake
-./config/cmake_ext_mod/hdf.bmp
-./config/cmake_ext_mod/hdf.icns
-./config/cmake_ext_mod/hdf.ico
-./config/cmake_ext_mod/HDFLibMacros.cmake
-./config/cmake_ext_mod/HDFMacros.cmake
-./config/cmake_ext_mod/HDFTests.c
-./config/cmake_ext_mod/HDFUseFortran.cmake
-./config/cmake_ext_mod/NSIS.InstallOptions.ini.in
-./config/cmake_ext_mod/NSIS.template.in
-./config/cmake_ext_mod/runTest.cmake
-./config/cmake_ext_mod/version.plist.in
-
-# CMake-specific User Files
-./config/cmake/UserMacros/Windows_MT.cmake
-
-# CMake-specific Examples Files
-./config/cmake/HDF5_Examples.cmake.in
-./release_docs/USING_CMake_Examples.txt
-
-./CMakeLists.txt
-./CMakeFilters.cmake
-./CMakeInstallation.cmake
-./CMakePlugins.cmake
-./CTestConfig.cmake
-./UserMacros.cmake
-./c++/CMakeLists.txt
-./c++/examples/CMakeLists.txt
-./c++/examples/CMakeTests.cmake
-./c++/src/CMakeLists.txt
-./c++/test/CMakeLists.txt
-./c++/test/CMakeTests.cmake
-./c++/test/CMakeVFDTests.cmake
-./examples/CMakeLists.txt
-./examples/CMakeTests.cmake
-./examples/run-all-ex.sh
-./examples/run-c-ex.sh.in
-./fortran/CMakeLists.txt
-./fortran/examples/CMakeLists.txt
-./fortran/examples/CMakeTests.cmake
-./fortran/src/CMakeLists.txt
-./fortran/test/CMakeLists.txt
-./fortran/test/CMakeTests.cmake
-./fortran/testpar/CMakeLists.txt
-./fortran/testpar/CMakeTests.cmake
-./hl/CMakeLists.txt
-./hl/c++/CMakeLists.txt
-./hl/c++/examples/CMakeLists.txt
-./hl/c++/examples/CMakeTests.cmake
-./hl/c++/src/CMakeLists.txt
-./hl/c++/test/CMakeLists.txt
-./hl/c++/test/CMakeTests.cmake
-./hl/examples/CMakeLists.txt
-./hl/examples/CMakeTests.cmake
-./hl/fortran/CMakeLists.txt
-./hl/fortran/examples/CMakeLists.txt
-./hl/fortran/examples/CMakeTests.cmake
-./hl/fortran/src/CMakeLists.txt
-./hl/fortran/test/CMakeLists.txt
-./hl/fortran/test/CMakeTests.cmake
-./hl/src/CMakeLists.txt
-./hl/test/CMakeLists.txt
-./hl/test/CMakeTests.cmake
-./hl/tools/CMakeLists.txt
-./hl/tools/gif2h5/CMakeLists.txt
-./hl/tools/gif2h5/CMakeTests.cmake
-./hl/tools/h5watch/CMakeLists.txt
-./hl/tools/h5watch/CMakeTests.cmake
-./src/CMakeLists.txt
-./test/CMakeLists.txt
-./test/CMakePassthroughVOLTests.cmake
-./test/CMakeTests.cmake
-./test/CMakeVFDTests.cmake
-./test/flushrefreshTest.cmake
-./test/ShellTests.cmake
-./testpar/CMakeLists.txt
-./testpar/CMakeTests.cmake
-./testpar/CMakeVFDTests.cmake
-./tools/CMakeLists.txt
-./tools/lib/CMakeLists.txt
-./tools/libtest/CMakeLists.txt
-./tools/libtest/CMakeTests.cmake
-./tools/src/CMakeLists.txt
-./tools/test/CMakeLists.txt
-./tools/src/h5copy/CMakeLists.txt
-./tools/test/h5copy/CMakeLists.txt
-./tools/test/h5copy/CMakeTests.cmake
-./tools/src/h5diff/CMakeLists.txt
-./tools/test/h5diff/CMakeLists.txt
-./tools/test/h5diff/CMakeTests.cmake
-./tools/src/h5dump/CMakeLists.txt
-./tools/test/h5dump/CMakeLists.txt
-./tools/test/h5dump/CMakeTests.cmake
-./tools/test/h5dump/CMakeTestsPBITS.cmake
-./tools/test/h5dump/CMakeTestsXML.cmake
-./tools/test/h5dump/CMakeTestsVDS.cmake
-./tools/test/h5dump/CMakeVFDTests.cmake
-./tools/src/h5format_convert/CMakeLists.txt
-./tools/test/h5format_convert/CMakeLists.txt
-./tools/test/h5format_convert/CMakeTests.cmake
-./tools/src/h5import/CMakeLists.txt
-./tools/test/h5import/CMakeLists.txt
-./tools/test/h5import/CMakeTests.cmake
-./tools/src/h5jam/CMakeLists.txt
-./tools/test/h5jam/CMakeLists.txt
-./tools/test/h5jam/CMakeTests.cmake
-./tools/src/h5ls/CMakeLists.txt
-./tools/test/h5ls/CMakeLists.txt
-./tools/test/h5ls/CMakeTests.cmake
-./tools/test/h5ls/CMakeTestsVDS.cmake
-./tools/src/h5repack/CMakeLists.txt
-./tools/test/h5repack/CMakeLists.txt
-./tools/test/h5repack/CMakeTests.cmake
-./tools/test/h5repack/CMakeVFDTests.cmake
-./tools/src/h5stat/CMakeLists.txt
-./tools/test/h5stat/CMakeLists.txt
-./tools/test/h5stat/CMakeTests.cmake
-./tools/src/misc/CMakeLists.txt
-./tools/src/h5perf/CMakeLists.txt
-./tools/test/misc/CMakeLists.txt
-./tools/test/misc/CMakeTestsClear.cmake
-./tools/test/misc/CMakeTestsMkgrp.cmake
-./tools/test/misc/CMakeTestsRepart.cmake
-./tools/test/misc/vds/CMakeLists.txt
-./tools/test/perform/CMakeLists.txt
-./tools/test/perform/CMakeTests.cmake
-
-./utils/CMakeLists.txt
-./utils/test/CMakeLists.txt
-./utils/mirror_vfd/CMakeLists.txt
-
-# CMake-specific User Scripts
-./config/cmake/CTestScript.cmake
-./config/cmake/HDF5_Examples_options.cmake
-./config/cmake/scripts/CTestScript.cmake
-./config/cmake/scripts/HDF5config.cmake
-./config/cmake/scripts/HDF5options.cmake
-
-# CMake-specific Sanitizer Scripts
-./config/sanitizer/code-coverage.cmake
-./config/sanitizer/formatting.cmake
-./config/sanitizer/sanitizers.cmake
-./config/sanitizer/tools.cmake
-./config/sanitizer/LICENSE
-./config/sanitizer/README.md
-
-# CMake-specific HPC Scripts
-./config/cmake/scripts/HPC/sbatch-HDF5options.cmake
-./config/cmake/scripts/HPC/bsub-HDF5options.cmake
-./config/cmake/scripts/HPC/qsub-HDF5options.cmake
-./config/cmake/scripts/HPC/raybsub-HDF5options.cmake
-
-# Files generated by autogen
-./aclocal.m4
-./bin/compile
-./bin/config.guess
-./bin/config.sub
-./bin/depcomp
-./bin/install-sh
-./bin/ltmain.sh
-./bin/missing
-./bin/test-driver
-./c++/examples/Makefile.in
-./c++/Makefile.in
-./configure
-./bin/Makefile.in
-./c++/src/Makefile.in
-./c++/test/Makefile.in
-./examples/Makefile.in
-./fortran/examples/Makefile.in
-./fortran/Makefile.in
-./fortran/src/Makefile.in
-./fortran/test/Makefile.in
-./fortran/testpar/Makefile.in
-./hl/c++/examples/Makefile.in
-./hl/c++/Makefile.in
-./hl/c++/src/Makefile.in
-./hl/c++/test/Makefile.in
-./hl/examples/Makefile.in
-./hl/fortran/examples/Makefile.in
-./hl/fortran/Makefile.in
-./hl/fortran/src/Makefile.in
-./hl/fortran/test/Makefile.in
-./hl/Makefile.in
-./hl/src/Makefile.in
-./hl/test/Makefile.in
-./hl/tools/gif2h5/Makefile.in
-./hl/tools/h5watch/Makefile.in
-./hl/tools/Makefile.in
-./java/examples/intro/Makefile.in
-./java/examples/datasets/Makefile.in
-./java/examples/datatypes/Makefile.in
-./java/examples/Makefile.in
-./java/examples/groups/Makefile.in
-./java/Makefile.in
-./java/test/Makefile.in
-./java/src/Makefile.in
-./java/src/jni/Makefile.in
-./m4/libtool.m4
-./m4/lt~obsolete.m4
-./m4/ltoptions.m4
-./m4/ltsugar.m4
-./m4/ltversion.m4
-./Makefile.in
-./src/H5config.h.in
-./src/H5Edefin.h
-./src/H5Einit.h
-./src/H5Epubgen.h
-./src/H5Eterm.h
-./src/H5overflow.h
-./src/H5version.h
-./src/Makefile.in
-./test/Makefile.in
-./testpar/Makefile.in
-./tools/Makefile.in
-./tools/lib/Makefile.in
-./tools/libtest/Makefile.in
-./tools/src/Makefile.in
-./tools/src/h5copy/Makefile.in
-./tools/src/h5diff/Makefile.in
-./tools/src/h5dump/Makefile.in
-./tools/src/h5format_convert/Makefile.in
-./tools/src/h5import/Makefile.in
-./tools/src/h5jam/Makefile.in
-./tools/src/h5ls/Makefile.in
-./tools/src/h5perf/Makefile.in
-./tools/src/h5repack/Makefile.in
-./tools/src/h5stat/Makefile.in
-./tools/src/misc/Makefile.in
-./tools/test/Makefile.in
-./tools/test/h5copy/Makefile.in
-./tools/test/h5diff/Makefile.in
-./tools/test/h5dump/Makefile.in
-./tools/test/h5format_convert/Makefile.in
-./tools/test/h5import/Makefile.in
-./tools/test/h5jam/Makefile.in
-./tools/test/h5ls/Makefile.in
-./tools/test/h5repack/Makefile.in
-./tools/test/h5stat/Makefile.in
-./tools/test/misc/Makefile.in
-./tools/test/misc/vds/Makefile.in
-./tools/test/perform/Makefile.in
-./utils/Makefile.in
-./utils/mirror_vfd/Makefile.in
-./utils/test/Makefile.in
diff --git a/README.txt b/README.md
index b9cfe8a..8f770ed 100644
--- a/README.txt
+++ b/README.md
@@ -1,8 +1,8 @@
-HDF5 version 1.13.1-1 currently under development
+HDF5 version 1.13.2-1 currently under development
-------------------------------------------------------------------------------
-Please refer to the release_docs/INSTALL file for installation instructions.
-------------------------------------------------------------------------------
+![HDF5 Logo](doxygen/img/HDF5.png)
+
+*Please refer to the release_docs/INSTALL file for installation instructions.*
THE HDF GROUP
---------------
@@ -13,17 +13,19 @@ de facto standard in scientific and research communities.
More information about The HDF Group, the HDF5 Community and the HDF5 software
project, tools and services can be found at the Group's website.
-
- https://www.hdfgroup.org/
+
+ https://www.hdfgroup.org/
DOCUMENTATION
-------------
This release is fully functional for the API described in the documentation.
- https://portal.hdfgroup.org/display/HDF5/The+HDF5+API
+
+ https://portal.hdfgroup.org/display/HDF5/The+HDF5+API
Full Documentation and Programming Resources for this release can be found at
- https://portal.hdfgroup.org/display/HDF5
+
+ https://portal.hdfgroup.org/display/HDF5
See the RELEASE.txt file in the release_docs/ directory for information specific
to the features and updates included in this release of the library.
@@ -42,7 +44,7 @@ HELP AND SUPPORT
----------------
Information regarding Help Desk and Support services is available at
- https://portal.hdfgroup.org/display/support/The+HDF+Help+Desk
+ https://portal.hdfgroup.org/display/support/The+HDF+Help+Desk
@@ -51,14 +53,14 @@ FORUM and NEWS
The following public forums are provided for public announcements and discussions
of interest to the general HDF5 Community.
- Homepage of the Forum
- https://forum.hdfgroup.org
+ - Homepage of the Forum
+ https://forum.hdfgroup.org
- News and Announcement
- https://forum.hdfgroup.org/c/news-and-announcements-from-the-hdf-group
+ - News and Announcement
+ https://forum.hdfgroup.org/c/news-and-announcements-from-the-hdf-group
- HDF5 and HDF4 Topics
- https://forum.hdfgroup.org/c/hdf5
+ - HDF5 and HDF4 Topics
+ https://forum.hdfgroup.org/c/hdf5
These forums are provided as an open and public service for searching and reading.
Posting requires completing a simple registration and allows one to join in the
@@ -67,14 +69,37 @@ use and configuration
https://forum.hdfgroup.org/t/quickstart-guide-welcome-to-the-new-hdf-forum
+RELEASE SCHEDULE
+----------------
+
+![HDF5 release schedule](doc/img/release-schedule.png)
+
+HDF5 does not release on a regular schedule. Instead, releases are driven by
+new features and bug fixes, though we try to have at least one release of each
+maintenance branch per year. Future HDF5 releases indicated on this schedule
+are tentative.
+
+| Release | New Features |
+| ------- | ------------ |
+| 1.13.2 | VFD SWMR, Onion VFD, Selection I/O |
+| 1.13.3 | Multi-Dataset I/O |
+| 1.13.4 | Subfiling |
+
+This list of feature release versions is also tentative, and the specific release
+in which a feature is introduced may change.
+
+
SNAPSHOTS, PREVIOUS RELEASES AND SOURCE CODE
--------------------------------------------
Periodically development code snapshots are provided at the following URL:
- https://gamma.hdfgroup.org/ftp/pub/outgoing/hdf5/snapshots/
+
+ https://gamma.hdfgroup.org/ftp/pub/outgoing/hdf5/snapshots/
Source packages for current and previous releases are located at:
- https://portal.hdfgroup.org/display/support/Downloads
+
+ https://portal.hdfgroup.org/display/support/Downloads
Development code is available at our Github location:
- https://github.com/HDFGroup/hdf5.git
+
+ https://github.com/HDFGroup/hdf5.git
diff --git a/bin/README b/bin/README
deleted file mode 100644
index 1c77043..0000000
--- a/bin/README
+++ /dev/null
@@ -1,2 +0,0 @@
-The daily tests run copies of some of the scripts in this directory from another repository, notably snapshot and runtest. The copies in this directory should work, but are not used in daily tests, though they should be tested occasionally.
-
diff --git a/bin/bbrelease b/bin/bbrelease
index cdb8678..d056f6d 100755
--- a/bin/bbrelease
+++ b/bin/bbrelease
@@ -14,32 +14,13 @@
# Make a release of hdf5.
#
-# Programmer: Robb Matzke
-# Creation date: on or before 1998-01-29.
+# NOTE:
+# This script differs from bin/release in that this has an added
+# --revision option to create private releases with the code revision
+# hash in the version strings.
#
-# Modifications
-# Robb Matzke, 1999-07-16
-# The SunOS 5.6 sed *must* have slashes as delimiters. I changed things like
-# `sed s+/CVS++' to `sed 's/\/CVS//'
-#
-# Albert Cheng, 1999-10-26
-# Moved the MANIFEST checking to a separate command file so that
-# it can be invoked individually.
-#
-# Albert Cheng, 2004-08-14
-# Added the --private option.
-#
-# James Laird, 2005-09-07
-# Added the md5 method.
-#
-# Larry Knox, 2016-08-30
-# Added the --revision option to create private releases with the
-# code revision hash in the version strings. Currently the version
-# of this script with the --revision option is named bbrelease. It
-# can probably be merged into the original release script in the
-# future.
-# Commands to get the revision hash have now been converted to git
-# to match the source repository change.
+# This script can probably be merged into the original release script in
+# the future.
# Function definitions
#
@@ -47,26 +28,25 @@
USAGE()
{
cat << EOF
-Usage: $0 -d <dir> [--docver BRANCHNAME] [-h] [--nocheck] [--private] <methods> ...
- -d DIR The name of the directory where the release(s) should be
+Usage: $0 -d <dir> [--docver BRANCHNAME] [-h] [--private] [--revision] <methods> ...
+ -d DIR The name of the directory where the release(s) should be
placed.
--docver BRANCHNAME This is added for 1.8 and beyond to get the correct
version of documentation files from the hdf5docs
repository. BRANCHNAME for v1.8 should be hdf5_1_8.
-h print the help page.
- --nocheck Ignore errors in MANIFEST file.
- --private Make a private release with today's date in version information.
- --revision Make a private release with the code revision number in version information.
-
+ --private Make a private release with today's date in version information.
+ --revision Make a private release with the code revision number in version information.
+
This must be run at the top level of the source directory.
The other command-line options are the names of the programs to use
for compressing the resulting tar archive (if none are given then
"tar" is assumed):
- tar -- use tar and don't do any compressing.
- gzip -- use gzip with "-9" and append ".gz" to the output name.
+ tar -- use tar and don't do any compressing.
+ gzip -- use gzip with "-9" and append ".gz" to the output name.
bzip2 -- use bzip2 with "-9" and append ".bz2" to the output name.
- zip -- convert all text files to DOS style and form a zip file for Windows use.
+ zip -- convert all text files to DOS style and form a zip file for Windows use.
doc -- produce the latest doc tree in addition to the archive.
An md5 checksum is produced for each archive created and stored in the md5 file.
@@ -97,15 +77,10 @@ EOF
# Function name: tar2zip
# Convert the release tarball to a Windows zipball.
#
-# Programmer: Albert Cheng
-# Creation date: 2014-04-23
-#
-# Modifications
-#
# Steps:
# 1. untar the tarball in a temporary directory;
# Note: do this in a temporary directory to avoid changing
-# the original source directory which maybe around.
+# the original source directory which may be around.
# 2. convert all its text files to DOS (LF-CR) style;
# 3. form a zip file which is usable by Windows users.
#
@@ -119,8 +94,8 @@ EOF
tar2zip()
{
if [ $# -ne 3 ]; then
- echo "usage: tar2zip <tarfilename> <zipfilename>"
- return 1
+ echo "usage: tar2zip <tarfilename> <zipfilename>"
+ return 1
fi
ztmpdir=/tmp/tmpdir$$
mkdir -p $ztmpdir
@@ -132,23 +107,23 @@ tar2zip()
(cd $ztmpdir; tar xf -) < $tarfile
# sanity check
if [ ! -d $ztmpdir/$version ]; then
- echo "untar did not create $ztmpdir/$version source dir"
- # cleanup
- rm -rf $ztmpdir
- return 1
+ echo "untar did not create $ztmpdir/$version source dir"
+ # cleanup
+ rm -rf $ztmpdir
+ return 1
fi
# step 2: convert text files
# There maybe a simpler way to do this.
# options used in unix2dos:
- # -k Keep the date stamp
+ # -k Keep the date stamp
# -q quiet mode
# grep redirect output to /dev/null because -q or -s are not portable.
find $ztmpdir/$version | \
- while read inf; do \
- if file $inf | grep "$inf\: .*text" > /dev/null 2>&1 ; then \
- unix2dos -q -k $inf; \
- fi\
- done
+ while read inf; do \
+ if file $inf | grep "$inf\: .*text" > /dev/null 2>&1 ; then \
+ unix2dos -q -k $inf; \
+ fi\
+ done
# step 3: make zipball
# -9 maximum compression
# -y Store symbolic links as such in the zip archive
@@ -163,14 +138,6 @@ tar2zip()
# This command must be run at the top level of the hdf5 source directory.
# Verify this requirement.
-# Since we are running bbrelease to create an HDF5 source tarfile for buildbot
-# testing with source that is not for release, there is not a file named
-# "configure" but there will be one named "configure.ac". The "configure"
-# file will be created when autogen.sh runs. There probably will always
-# be a bin/release file, but just in case it is removed, we can check for
-# this script, bbrelease, in the bin directory. The bin/release script should
-# continue to check for "configure" because it should be present in release
-# source.
if [ ! \( -f configure.ac -a -f bin/bbrelease \) ]; then
echo "$0 must be run at the top level of the hdf5 source directory"
exit 1
@@ -182,12 +149,11 @@ VERS=`perl bin/h5vers`
VERS_OLD=
test "$VERS" || exit 1
verbose=yes
-check=yes
release_date=`date +%F`
today=`date +%Y%m%d`
pmode='no'
revmode='no'
-tmpdir="../#release_tmp.$$" # tmp work directory
+tmpdir="../#release_tmp.$$" # tmp work directory
DOC_URL=https://git@bitbucket.hdfgroup.org/scm/hdffv/hdf5doc.git
CPPLUS_RM_NAME=cpplus_RM
@@ -195,11 +161,11 @@ CPPLUS_RM_NAME=cpplus_RM
RESTORE_VERSION()
{
if [ X-${VERS_OLD} != X- ]; then
- echo restoring version information back to $VERS_OLD
- rm -f config/lt_vers.am
- cp $tmpdir/lt_vers.am config/lt_vers.am
- bin/h5vers -s $VERS_OLD
- VERS_OLD=
+ echo restoring version information back to $VERS_OLD
+ rm -f config/lt_vers.am
+ cp $tmpdir/lt_vers.am config/lt_vers.am
+ bin/h5vers -s $VERS_OLD
+ VERS_OLD=
fi
}
@@ -209,20 +175,17 @@ while [ -n "$1" ]; do
arg=$1
shift
case "$arg" in
- -d)
- DEST=$1
- shift
- ;;
- --nocheck)
- check=no
- ;;
- -h)
- USAGE
- exit 0
- ;;
- --private)
- pmode=yes
- ;;
+ -d)
+ DEST=$1
+ shift
+ ;;
+ -h)
+ USAGE
+ exit 0
+ ;;
+ --private)
+ pmode=yes
+ ;;
--revision)
revmode=yes
;;
@@ -230,14 +193,14 @@ while [ -n "$1" ]; do
DOCVERSION=$1
shift
;;
- -*)
- echo "Unknown switch: $arg" 1>&2
- USAGE
- exit 1
- ;;
- *)
- methods="$methods $arg"
- ;;
+ -*)
+ echo "Unknown switch: $arg" 1>&2
+ USAGE
+ exit 1
+ ;;
+ *)
+ methods="$methods $arg"
+ ;;
esac
done
@@ -278,7 +241,7 @@ if [ X$revmode = Xyes ]; then
cp config/lt_vers.am $tmpdir
branch=`git branch | grep '*' | awk '{print $NF}'`
revision=`git rev-parse --short HEAD`
-# Set version information to m.n.r-r$revision.
+ # Set version information to m.n.r-r$revision.
# (h5vers does not correctly handle just m.n.r-$today.)
VERS=`echo $VERS | sed -e s/-.*//`-$revision
echo Private release of $VERS
@@ -299,30 +262,17 @@ if [ ! -d $DEST ]; then
exit 1
fi
-# Check the validity of the MANIFEST file.
-bin/chkmanifest || fail=yes
-if [ "X$fail" = "Xyes" ]; then
- if [ $check = yes ]; then
- exit 1
- else
- echo "Continuing anyway..."
- fi
-fi
-
-# Create a manifest that contains only files for distribution.
-MANIFEST=$tmpdir/H5_MANIFEST
-grep '^\.' MANIFEST | grep -v _DO_NOT_DISTRIBUTE_ >$MANIFEST
-
-# Prepare the source tree for a release.
-#ln -s `pwd` $tmpdir/$HDF5_VERS || exit 1
+# Create a symlink to the source so files in the tarball have the prefix
+# we want (gnu's --transform isn't portable)
ln -s `pwd` $tmpdir/$HDF5_IN_VERS || exit 1
+
# Save a backup copy of Makefile if exists.
test -f Makefile && mv Makefile $tmpdir/Makefile.x
cp -p Makefile.dist Makefile
-# Update README.txt and release_docs/RELEASE.txt with release information in
+# Update README.md and release_docs/RELEASE.txt with release information in
# line 1.
-for f in README.txt release_docs/RELEASE.txt; do
+for f in README.md release_docs/RELEASE.txt; do
echo "HDF5 version $VERS released on $release_date" >$f.x
sed -e 1d $f >>$f.x
mv $f.x $f
@@ -330,64 +280,60 @@ for f in README.txt release_docs/RELEASE.txt; do
chmod 644 $f
done
-# trunk is different than branches.
+# develop is different than branches.
if [ "${DOCVERSION}" ]; then
DOC_URL=https://git@bitbucket.hdfgroup.org/scm/hdffv/hdf5doc.git -b ${DOCVERSION}
fi
# Create the tar file
test "$verbose" && echo " Running tar..." 1>&2
-( \
- cd $tmpdir; \
- tar cf $HDF5_VERS.tar $HDF5_IN_VERS/Makefile \
- `sed 's/^\.\//'$HDF5_IN_VERS'\//' $MANIFEST` || exit 1 \
-)
+(cd "$tmpdir" && exec tar -ch --exclude-vcs -f "$HDF5_VERS.tar" "./$HDF5_IN_VERS" || exit 1 )
# Compress
MD5file=$HDF5_VERS.md5
cp /dev/null $DEST/$MD5file
for comp in $methods; do
case $comp in
- tar)
- cp -p $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.tar
- (cd $DEST; md5sum $HDF5_VERS.tar >> $MD5file)
- ;;
- gzip)
- test "$verbose" && echo " Running gzip..." 1>&2
- gzip -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.gz
- (cd $DEST; md5sum $HDF5_VERS.tar.gz >> $MD5file)
- ;;
- bzip2)
- test "$verbose" && echo " Running bzip2..." 1>&2
- bzip2 -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.bz2
- (cd $DEST; md5sum $HDF5_VERS.tar.bz2 >> $MD5file)
- ;;
- zip)
- test "$verbose" && echo " Creating zip ball..." 1>&2
- tar2zip $HDF5_IN_VERS $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.zip 1>&2
- (cd $DEST; md5sum $HDF5_VERS.zip >> $MD5file)
- ;;
- doc)
+ tar)
+ cp -p $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.tar
+ (cd $DEST; md5sum $HDF5_VERS.tar >> $MD5file)
+ ;;
+ gzip)
+ test "$verbose" && echo " Running gzip..." 1>&2
+ gzip -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.gz
+ (cd $DEST; md5sum $HDF5_VERS.tar.gz >> $MD5file)
+ ;;
+ bzip2)
+ test "$verbose" && echo " Running bzip2..." 1>&2
+ bzip2 -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.bz2
+ (cd $DEST; md5sum $HDF5_VERS.tar.bz2 >> $MD5file)
+ ;;
+ zip)
+ test "$verbose" && echo " Creating zip ball..." 1>&2
+ tar2zip $HDF5_IN_VERS $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.zip 1>&2
+ (cd $DEST; md5sum $HDF5_VERS.zip >> $MD5file)
+ ;;
+ doc)
if [ "${DOCVERSION}" = "" ]; then
DOCVERSION=master
fi
- test "$verbose" && echo " Creating docs..." 1>&2
- # Check out docs from git repo
- (cd $tmpdir; git clone $DOC_URL > /dev/null) || exit 1
- # Create doxygen C++ RM
- (cd c++/src && doxygen cpp_doc_config > /dev/null ) || exit 1
- # Replace version of C++ RM with just-created version
+ test "$verbose" && echo " Creating docs..." 1>&2
+ # Check out docs from git repo
+ (cd $tmpdir; git clone $DOC_URL > /dev/null) || exit 1
+ # Create doxygen C++ RM
+ (cd c++/src && doxygen cpp_doc_config > /dev/null ) || exit 1
+ # Replace version of C++ RM with just-created version
rm -rf $tmpdir/${DOCVERSION}/html/$CPPLUS_RM_NAME
mv c++/src/$CPPLUS_RM_NAME $tmpdir/${DOCVERSION}/html/$CPPLUS_RM_NAME
# Compress the docs and move them to the release area
mv $tmpdir/$DOCVERSION $tmpdir/${HDF5_VERS}_docs
- (cd $tmpdir && tar cf ${HDF5_VERS}_docs.tar ${HDF5_VERS}_docs)
- mv $tmpdir/${HDF5_VERS}_docs.tar $DEST
- ;;
- *)
- echo "***Error*** Unknown method $comp"
- exit 1
- ;;
+ (cd $tmpdir && tar cf ${HDF5_VERS}_docs.tar ${HDF5_VERS}_docs)
+ mv $tmpdir/${HDF5_VERS}_docs.tar $DEST
+ ;;
+ *)
+ echo "***Error*** Unknown method $comp"
+ exit 1
+ ;;
esac
done
@@ -408,4 +354,6 @@ fi
# Remove temporary things
rm -rf $tmpdir
+echo "DONE"
+
exit 0
diff --git a/bin/checkposix b/bin/checkposix
index 36d07a8..bca259d 100755
--- a/bin/checkposix
+++ b/bin/checkposix
@@ -115,7 +115,7 @@ foreach $arg (@ARGV) {
# These are really HDF5 functions/macros even though they don't
# start with `h' or `H'.
- next if $name =~ /^FUNC_(ENTER|LEAVE)(_(NO)?API|_PACKAGE|_STATIC)?(_NOFS|_NOCLEAR|_NOINIT|_NOPUSH)?(_NOFUNC|_TAG)?$/;
+ next if $name =~ /^FUNC_(ENTER|LEAVE)(_(NO)?API|_PACKAGE|_STATIC)?(_NAMECHECK_ONLY|_NOFS|_NOCLEAR|_NOINIT|_NOPUSH)?(_NOFUNC|_TAG)?$/;
next if $name =~ /^(BEGIN|END)_FUNC$/;
next if $name =~ /^U?INT(8|16|32|64)(ENCODE|DECODE)(_VAR)?$/;
next if $name =~ /^CI_(PRINT_STATS|INC_SRC|INC_DST)$/;
@@ -123,10 +123,13 @@ foreach $arg (@ARGV) {
next if $name =~ /^(MIN3?|MAX3?|NELMTS|POWER_OF_TWO|REGION_OVERFLOW)$/;
next if $name =~ /^(SIZE_OVERFLOW|UNIQUE_MEMBERS|S_ISDIR)$/;
next if $name =~ /^addr_defined$/;
+ next if $name =~ /^TERMINATOR$/;
- # These functions/macros are exempt.
- # op, cb, and OP are often spuriously flagged so ignore them.
- next if $name =~ /^(main|op|cb|OP)$/;
+ # Ignore callback invocation
+ next if $name =~ /^(op|cb|OP|iter_op|func)$/;
+
+ # Ignore main
+ next if $name =~ /^main$/;
# This often appears in preprocessor lines that span multiple lines
next if $name =~ /^(defined)$/;
diff --git a/bin/chkmanifest b/bin/chkmanifest
deleted file mode 100755
index 08ca1fa..0000000
--- a/bin/chkmanifest
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/bin/sh
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-
-# Check that all the files in MANIFEST exist and (if this is a
-# GIT checkout) that all the GIT-managed files appear in the
-# MANIFEST.
-#
-
-verbose=yes
-MANIFEST=/tmp/HD_MANIFEST.$$
-AUTOGEN=./autogen.sh
-AUTOGEN_LOG=/tmp/autogen.log.$$
-
-# Main
-test "$verbose" && echo " Checking MANIFEST..." 1>&2
-# clean up $MANIFEST file when exits
-trap "rm -f $MANIFEST" 0
-
-# Only split lines on newline, not whitespace
-set -f
-IFS='
-'
-
-# First make sure i am in the directory in which there is an MANIFEST file
-# and then do the checking from there. Will try the following,
-# current directory, parent directory, the directory this command resides.
-if [ -f MANIFEST ]; then
- continue
-elif [ -f ../MANIFEST ]; then
- cd ..
-else
- commanddir=`dirname $0`
- if [ -d "$commanddir" -a -f $commanddir/MANIFEST ]; then
- cd $commanddir
- continue
- else
- echo MANIFEST file not found. Abort.
- exit 1
- fi
-fi
-
-# Do an autogen if generated files (e.g., configure) is not present
-if [ ! -f configure ]; then
- echo " running $AUTOGEN"
- $AUTOGEN > $AUTOGEN_LOG 2>&1
- if [ $? -ne 0 ]; then
- echo $AUTOGEN encountered error. Abort.
- echo output from $AUTOGEN:
- cat $AUTOGEN_LOG
- exit 1
- fi
- rm $AUTOGEN_LOG
-fi
-
-# Check for duplicate entries. This can be done at any time, but it may as
-# well be sooner so that if something else fails the presence of duplicates
-# will already be known.
-errcode=0
-DUPLICATES=`perl -ne 's/#.*//; next if /^\s*$/; if ($uniq{$_}++) { print $_; }' MANIFEST`
-if [ "$DUPLICATES" ]; then
- cat 1>&2 <<EOF
-These entries appear more than once in the MANIFEST:
-$DUPLICATES
-Please remove the duplicate lines and try again.
-
-EOF
-errcode=1
-fi
-
-# Copy the manifest file to get a list of file names.
-grep '^\.' MANIFEST | expand | cut -f1 -d' ' >$MANIFEST
-
-for file in `cat $MANIFEST`; do
- if [ ! -f $file ]; then
- echo "- $file"
- fail=yes
- fi
-done
-
-# Get the list of files under version control and check that they are
-# present.
-#
-# First get a list of all the pending files with git status and
-# check those.
-git_stat=`git status -s`
-for file in $git_stat; do
-
- # Newly added files are not listed by git ls-files, which
- # we check later.
-
- # The line listing new files starts with 'A'.
- letter=`echo $file | head -c 1`
- if [ "$letter" = "A" ]; then
- # Convert the git status columns to './' so it matches
- # the manifest file name.
- #
- # There is a space between the status columns and file name, hence
- # the '3'.
- path=`echo $file | sed 's/^.\{3\}/\.\//g'`
- # Ignore directories
- if [ ! -d $path ]; then
- if (grep ^$path$ $MANIFEST >/dev/null); then
- :
- else
- echo "- $path"
- fail=yes
- fi
- fi
- fi
-done
-
-# Next check git ls-files, which gets a list of all files that are
-# checked in.
-git_ls=`git ls-files`
-for file in $git_ls; do
- path="./${file}"
- # Ignore directories
- if [ ! -d $path ]; then
- if (grep ^$path$ $MANIFEST >/dev/null); then
- :
- else
- echo "+ $path"
- fail=yes
- fi
- fi
-done
-
-# Finish up
-if [ "X$fail" = "Xyes" ]; then
- cat 1>&2 <<EOF
-The MANIFEST is out of date. Files marked with a minus sign (-) no
-longer exist; files marked with a plus sign (+) are GIT-managed but do
-not appear in the MANIFEST. Please remedy the situation and try again.
-EOF
- exit 1
-fi
-
-if [ $errcode -ne 0 ]; then
- exit 1
-fi
-
-test "$verbose" && echo " The MANIFEST is up to date." 1>&2
-exit 0
diff --git a/bin/h5vers b/bin/h5vers
index 8f75df1..04d5d03 100755
--- a/bin/h5vers
+++ b/bin/h5vers
@@ -66,7 +66,7 @@ use strict;
# ./H5public.h or ./src/H5public.h.
#
# If the version number is changed (either `-s' or `-i' was used on
-# the command line) then the first line of the README.txt and RELEASE.txt files
+# the command line) then the version line of the README.md and RELEASE.txt files
# one directory above the H5public.h file is also modified so it looks
# something like: This is hdf5-1.2.3-pre1 currently under development.
# The AC_INIT macro in configure.ac will also change in this case to be
@@ -156,10 +156,10 @@ while ($_ = shift) {
}
die "mutually exclusive options given\n" if $set && $inc;
-# Determine file to use as H5public.h, README.txt,
+# Determine file to use as H5public.h, README.md,
# release_docs/RELEASE.txt, configure.ac, windows/src/H5pubconf.h
# config/lt_vers.am and config/cmake/scripts/HDF5config.cmake.
-# The README.txt, release_docs/RELEASE.txt, configure.ac,
+# The README.md, release_docs/RELEASE.txt, configure.ac,
# windows/src/H5pubconf.h, config/lt_vers.am and
# config/cmake/scripts/HDF5config.cmake
# files are always in the directory above H5public.h
@@ -178,9 +178,9 @@ die "unable to read file: $LT_VERS\n" unless -r $file;
my $HDF5CONFIGCMAKE = $file;
$HDF5CONFIGCMAKE =~ s/[^\/]*$/..\/config\/cmake\/scripts\/HDF5config.cmake/;
die "unable to read file: $HDF5CONFIGCMAKE\n" unless -r $file;
-# README.txt
+# README.md
my $README = $file;
-$README =~ s/[^\/]*$/..\/README.txt/;
+$README =~ s/[^\/]*$/..\/README.md/;
die "unable to read file: $README\n" unless -r $file;
# release_docs/RELEASE.txt
my $RELEASE = $file;
@@ -303,7 +303,7 @@ if ($LT_VERS && $version_increased) {
# close FILE;
}
-# Update the README.txt file
+# Update the README.md file
if ($README) {
open FILE, $README or die "$README: $!\n";
my @contents = <FILE>;
diff --git a/bin/locate_sw b/bin/locate_sw
deleted file mode 100755
index 1cf84e2..0000000
--- a/bin/locate_sw
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/bin/sh
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-
-# Try to locate the software as named in argument.
-# This is a sequential search of all possible locations of the software.
-# Usage: locate_sw <SW-Name>
-# It prints a string showing the paths leading to the include, lib and bin
-# directory of the software, separated by colons. E.g., if the software is
-# located in /usr/sdt/*, it prints
-# /usr/sdt/include:/usr/sdt/lib:/usr/sdt/bin
-# Any component that is not found will be returned as an empty string. E.g.,
-# if somehow the header files of the software are not found, it prints
-# :/usr/sdt/lib;/usr/sdt/bin
-
-# Function definitions
-USAGE()
-{
- echo "Usage: locate_sw <SW-Name>"
- echo " where <SW-Name> can be hdf4, hdf5, zlib"
- echo " It prints the paths leading the header files (include),"
- echo " library (lib), and tools (bin). E.g.,"
- echo " /usr/sdt/include:/usr/sdt/lib:/usr/sdt/bin"
- echo " Any component that is not found will be returned as an empty string. E.g.,"
- echo " if somehow the header files of the software are not found, it prints"
- echo " :/usr/sdt/lib;/usr/sdt/bin"
- echo "Exit code: 0 if software located; otherwise non-zero"
-}
-
-# locate hdf4 software
-locate_hdf4()
-{
-# this default is the best guess of locating hdf4 software
-swpaths_defaults="/usr/ncsa /usr/sdt /usr/local"
-swpaths=
-
-case "$OSname" in
- SunOS)
- case "$OSrelease" in
- 5.7)
- swpaths="/afs/ncsa/packages/hdf/SunOS_5.7"
- ;;
- *)
- # use default
- ;;
- esac
- ;;
- HP-UX)
- case "$OSrelease" in
- B.11.00)
- swpaths="/afs/ncsa/packages/hdf/HPUX_11.00"
- ;;
- *)
- # use default
- ;;
- esac
- ;;
- Linux)
- swpaths="/afs/ncsa/packages/hdf/Linux"
- ;;
- OSF1)
- swpaths="/afs/ncsa/packages/hdf/OSF1_V4.0"
- ;;
- *)
- # just use the defaults
- ;;
-esac
-
-# Check if the hdf4 software is actually available.
-# Accept the directory only if needed .h, .a and tools are found
-# in the same place. That way, they are more likely to be of the
-# same version.
-#
-swpaths="$swpaths $swpaths_defaults"
-for sw in $swpaths; do
- if [ -r $sw/include/hdf.h -a -r $sw/lib/libdf.a -a -r $sw/bin/hdp ]; then
- SW_inc=$sw/include
- SW_lib=$sw/lib
- SW_bin=$sw/bin
- SW_Location=$sw
- break
- fi
-done
-}
-
-# locate hdf5 software
-locate_hdf5()
-{
-# this default is the best guess of locating hdf5 software
-swpaths_defaults="/usr/ncsa /usr/sdt /usr/local"
-swpaths=
-
-case "$OSname" in
- SunOS)
- case "$OSrelease" in
- 5.7)
- swpaths="/afs/ncsa/packages/hdf5/SunOS_5.7"
- ;;
- *)
- # use default
- ;;
- esac
- ;;
- HP-UX)
- case "$OSrelease" in
- B.11.00)
- swpaths="/afs/ncsa/packages/hdf5/HPUX_11.00"
- ;;
- *)
- # use default
- ;;
- esac
- ;;
- Linux)
- swpaths="/afs/ncsa/packages/hdf5/Linux"
- ;;
- FreeBSD)
- swpaths="/afs/ncsa/packages/hdf5/FreeBSD"
- ;;
- OSF1)
- swpaths="/afs/ncsa/packages/hdf5/OSF1_V4.0"
- ;;
- *)
- # just use the defaults
- ;;
-esac
-
-# Check if the hdf5 software is actually available.
-# Accept the directory only if needed .h, .a and tools are found
-# in the same place. That way, they are more likely to be of the
-# same version.
-#
-swpaths="$swpaths $swpaths_defaults"
-for sw in $swpaths; do
- if [ -r $sw/include/hdf5.h -a -r $sw/lib/libhdf5.a -a -r $sw/bin/h5dump ]; then
- SW_inc=$sw/include
- SW_lib=$sw/lib
- SW_bin=$sw/bin
- SW_Location=$sw
- break
- fi
-done
-}
-
-# locate zlib software
-locate_zlib()
-{
-# this default is the best guess of locating zlib software
-swpaths_defaults="/usr /usr/local /usr/ncsa /usr/sdt"
-swpaths=
-
-
-# Check if the zlib software is actually available.
-# Accept the directory only if needed .h, .a and tools are found
-# in the same place. That way, they are more likely to be of the
-# same version.
-# Don't know something specific to check the bin directory. Maybe gzip?
-# Just make sure it exists.
-#
-swpaths="$swpaths $swpaths_defaults"
-for sw in $swpaths; do
- if [ -r $sw/include/zlib.h -a \
- \( -r $sw/lib/libz.a -o -r $sw/lib/libz.so \) -a -d $cw/bin ]; then
- SW_inc=$sw/include
- SW_lib=$sw/lib
- SW_bin=$sw/bin
- SW_Location=$sw
- break
- fi
-done
-
-# if none found, try HDF4 software which contains a version of zlib.
-if [ x-$SW_Location = x- ]; then
- locate_hdf4
-fi
-
-}
-
-# Main
-#
-# Options
-#
-if [ $# -lt 1 ]; then
- USAGE
- exit 1
-fi
-
-if [ "$1" = -h ]; then
- USAGE
- exit 0
-fi
-
-SW=$1
-shift
-
-# locations of the software seeked.
-SW_inc= # include place
-SW_lib= # library place
-SW_bin= # binary place
-SW_Location= # parent directory of all the above
-
-OSname=`uname -s`
-OSrelease=`uname -r`
-
-case $SW in
-hdf4|hdf)
- locate_hdf4
- ;;
-hdf5)
- locate_hdf5
- ;;
-zlib)
- locate_zlib
- ;;
-*)
- echo "unknown software ($SW)"
- USAGE
- exit 1
- ;;
-esac
-
-# show the results located, separated by commas.
-if [ -n "${SW_inc}" -a -n "${SW_lib}" -a -n "${SW_bin}" ]; then
- echo ${SW_inc},${SW_lib},${SW_bin}
- exit 0
-else
- exit 1
-fi
diff --git a/bin/release b/bin/release
index e40c3d3..84555b6 100755
--- a/bin/release
+++ b/bin/release
@@ -13,24 +13,6 @@
#
# Make a release of hdf5.
-#
-# Programmer: Robb Matzke
-# Creation date: on or before 1998-01-29.
-#
-# Modifications
-# Robb Matzke, 1999-07-16
-# The SunOS 5.6 sed *must* have slashes as delimiters. I changed things like
-# `sed s+/CVS++' to `sed 's/\/CVS//'
-#
-# Albert Cheng, 1999-10-26
-# Moved the MANIFEST checking to a separate command file so that
-# it can be invoked individually.
-#
-# Albert Cheng, 2004-08-14
-# Added the --private option.
-#
-# James Laird, 2005-09-07
-# Added the md5 method.
# Function definitions
#
@@ -38,14 +20,13 @@
USAGE()
{
cat << EOF
-Usage: $0 -d <dir> [--docver BRANCHNAME] [-h] [--nocheck] [--private] <methods> ...
- -d DIR The name of the directory where the release(es) should be
+Usage: $0 -d <dir> [--docver BRANCHNAME] [-h] [--private] <methods> ...
+ -d DIR The name of the directory where the release(s) should be
placed.
--docver BRANCHNAME This is added for 1.8 and beyond to get the correct
version of documentation files from the hdf5docs
repository. BRANCHNAME for v1.8 should be hdf5_1_8.
-h print the help page.
- --nocheck Ignore errors in MANIFEST file.
--private Make a private release with today's date in version information.
This must be run at the top level of the source directory.
@@ -100,11 +81,6 @@ EOF
# Function name: tar2zip
# Convert the release tarball to a Windows zipball.
#
-# Programmer: Albert Cheng
-# Creation date: 2014-04-23
-#
-# Modifications
-#
# Steps:
# 1. untar the tarball in a temporary directory;
# Note: do this in a temporary directory to avoid changing
@@ -167,11 +143,6 @@ tar2zip()
# Function name: tar2cmakezip
# Convert the release tarball to a Windows zipball with files to run CMake build.
#
-# Programmer: Larry Knox
-# Creation date: 2017-02-20
-#
-# Modifications
-#
# Steps:
# 1. untar the tarball in a temporary directory;
# Note: do this in a temporary directory to avoid changing
@@ -269,10 +240,6 @@ tar2cmakezip()
# Function name: tar2cmaketgz
# Convert the release tarball to a gzipped tar file with files to run CMake build.
#
-# Programmer: Larry Knox
-# Creation date: 2017-02-20
-#
-# Modifications
#
# Steps:
# 1. untar the tarball in a temporary directory;
@@ -347,11 +314,6 @@ tar2cmaketgz()
# and HDF5options.cmake files for parallel or serial only builds where build
# tests are run on compute nodes using batch scripts.
#
-# Programmer: Larry Knox
-# Creation date: 2019-01-28
-#
-# Modifications
-#
# Steps:
# 1. untar the tarball in a temporary directory;
# Note: do this in a temporary directory to avoid changing
@@ -443,7 +405,6 @@ VERS=`perl bin/h5vers`
VERS_OLD=
test "$VERS" || exit 1
verbose=yes
-check=yes
release_date=`date +%F`
today=`date +%Y%m%d`
pmode='no'
@@ -482,9 +443,6 @@ while [ -n "$1" ]; do
DEST=$1
shift
;;
- --nocheck)
- check=no
- ;;
-h)
USAGE
exit 0
@@ -546,35 +504,17 @@ if [ ! -d $DEST ]; then
exit 1
fi
-# Check the validity of the MANIFEST file.
-bin/chkmanifest || fail=yes
-if [ "X$fail" = "Xyes" ]; then
- if [ $check = yes ]; then
- echo ""
- echo "Note! If you are running bin/release in a development branch"
- echo "later than v 1.8 the MANIFEST check is expected to fail when"
- echo "autogen.sh has not been run successfully. Either run autogen.sh "
- echo "with /usr/hdf/bin/AUTOTOOLS at the beginning of PATH or add the"
- echo "--nocheck argument to the bin/release command."
- exit 1
- else
- echo "Continuing anyway..."
- fi
-fi
-
-# Create a manifest that contains only files for distribution.
-MANIFEST=$tmpdir/H5_MANIFEST
-grep '^\.' MANIFEST | grep -v _DO_NOT_DISTRIBUTE_ >$MANIFEST
-
-# Prepare the source tree for a release.
+# Create a symlink to the source so files in the tarball have the prefix
+# we want (gnu's --transform isn't portable)
ln -s `pwd` $tmpdir/$HDF5_VERS || exit 1
+
# Save a backup copy of Makefile if exists.
test -f Makefile && mv Makefile $tmpdir/Makefile.x
cp -p Makefile.dist Makefile
-# Update README.txt and release_docs/RELEASE.txt with release information in
+# Update README.md and release_docs/RELEASE.txt with release information in
# line 1.
-for f in README.txt release_docs/RELEASE.txt; do
+for f in README.md release_docs/RELEASE.txt; do
echo "HDF5 version $VERS released on $release_date" >$f.x
sed -e 1d $f >>$f.x
mv $f.x $f
@@ -582,18 +522,14 @@ for f in README.txt release_docs/RELEASE.txt; do
chmod 644 $f
done
-# trunk is different than branches.
+# develop is different than branches.
if [ "${DOCVERSION}" ]; then
DOC_URL="$DOC_URL -b ${DOCVERSION}"
fi
# Create the tar file
test "$verbose" && echo " Running tar..." 1>&2
-( \
- cd $tmpdir; \
- tar cf $HDF5_VERS.tar $HDF5_VERS/Makefile \
- `sed 's/^\.\//hdf5-'$VERS'\//' $MANIFEST` || exit 1 \
-)
+(cd "$tmpdir" && exec tar -ch --exclude-vcs -f "$HDF5_VERS.tar" "./$HDF5_VERS" || exit 1 )
# Compress
MD5file=$HDF5_VERS.md5
@@ -680,4 +616,6 @@ fi
# Remove temporary things
rm -rf $tmpdir
+echo "DONE"
+
exit 0
diff --git a/bin/restore.sh b/bin/restore.sh
index 47dde11..0597572 100755
--- a/bin/restore.sh
+++ b/bin/restore.sh
@@ -48,6 +48,9 @@ rm -f bin/missing
rm -f bin/test-driver
rm -f bin/depcomp
+echo "Remove files generated by autoheader"
+rm -f src/H5config.h.in
+
echo "Remove files generated by bin/make_err"
rm -f src/H5Epubgen.h
rm -f src/H5Einit.h
diff --git a/bin/runtest b/bin/runtest
deleted file mode 100755
index 5e05abb..0000000
--- a/bin/runtest
+++ /dev/null
@@ -1,966 +0,0 @@
-#! /bin/sh
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-
-# run the hdf5/bin/snapshot
-# Usage:
-# runtest run the test for the local host
-# runtest <hostname> run the test for <hostname>
-# runtest -all run the test for all predefined hosts
-#
-# Assumptions in knowing where to find the right scripts to execute.
-# 1. assume we are at the top level of the hdf5 source. So, bin/* are
-# where the script files are.
-# 2. after the cvs update is completed, we can go to the snapshot area
-# hdf5 source tree and use the bin/* there.
-# 3. Cannot use the snapshot area scripts from the beginning because
-# for one, the current directory is renamed as previous right after
-# a snapshot release; and for another, some scripts may be changed
-# by the cvs update while it is being used.
-
-# local setup
-DEBUGMODE=""
-test -n "$DEBUGMODE" && echo "******** DEBUGMODE is $DEBUGMODE ************"
-WHEREAMI='pwd'
-CMD=
-
-# the name of this program
-PROGNAME="bin/runtest $DEBUGMODE"
-
-# Setup
-HOSTNAME=`hostname | cut -f1 -d.` # no domain part
-TODAY=`date +%m%d%a`
-WEEKDAY=`date +%a`
-H5VER= # default to current CVS version
-H5VERSION= # default to current CVS version
-n_test=0 # Number of tests ran
-n_pass=0 # Number of tests passed
-n_fail=0 # Number of tests failed
-n_skip=0 # Number of tests skipped
-
-# Default to do checkout (only once) and test, no release.
-# Will run test only if there is significant differences from previous version.
-# If srcdir is not used, don't launched multiple tests
-SNAPSHOT="${DEBUGMODE:+echo }bin/snapshot"
-SRCDIR="srcdir"
-# Default standard Snaptest commands
-SNAPCMD="$SRCDIR test clean"
-# Default Standard snaptest command options
-STANDARD_OPT=""
-ENABLE_PARALLEL="--enable-parallel"
-CONFIGNAME=$HOSTNAME # Name used in the SNAPTESTCFG file
-
-# test host default as local host.
-TESTHOST=""
-
-#################################
-# Function definitions
-#################################
-
-# Print messages to stdout
-# Use this to show output heading to stdout
-PRINT()
-{
- echo "$*"
-}
-
-# Show seconds since midnight.
-# This is used to calculate seconds elapsed
-SecOfDay()
-{
- set `date '+%H %M %S'`
- t_sec=`expr $1 \* 3600 + $2 \* 60 + $3`
- echo $t_sec
-}
-
-# Calculated the elapsed time (in seconds) between the first
-# and second time. If second time is smaller than the first,
-# we assume the clock has passed midnight and calculate appropriately.
-ElapsedTime()
-{
- if [ $2 -lt $1 ]; then
- t_sec=`expr 3600 \* 24 - $1 + $2`
- else
- t_sec=`expr $2 - $1`
- fi
- echo `expr $t_sec / 60`m `expr $t_sec % 60`s
-}
-
-# Report errors
-# $1--an error message to be printed
-REPORT_ERR()
-{
- ERRMSG=$1
- # print it with a banner shifted right a bit
- PRINT " *************************************"
- PRINT " `date`"
- PRINT " $ERRMSG"
- PRINT " *************************************"
- # report it in the FAILED-LOG file too
- PRINT "$ERRMSG" >> $FAILEDLOG
-}
-
-#
-# Report results of the last test done
-REPORT_RESULT()
-{
- if [ $retcode -eq 0 ]; then
- if [ $skiptest = yes ]; then
- n_skip=`expr $n_skip + 1`
- PRINT "SKIPPED ${HOSTNAME}: $TEST_TYPE" | tee -a $SKIPPEDLOG
- else
- n_pass=`expr $n_pass + 1`
- PRINT "PASSED ${HOSTNAME}: $TEST_TYPE" | tee -a $PASSEDLOG
- fi
- else
- # test failed.
- n_fail=`expr $n_fail + 1`
- REPORT_ERR "****FAILED ${HOSTNAME}: $TEST_TYPE****"
- fi
-}
-
-# Print a blank line
-PRINT_BLANK()
-{
- PRINT
-}
-
-# Print test trailer
-PRINT_TEST_TRAILER()
-{
- PRINT "*** finished $TEST_TYPE tests for $HOSTNAME ***"
- date; EndTime=`SecOfDay`
- PRINT Total time = `ElapsedTime $StartTime $EndTime`
- PRINT_BLANK
-}
-
-# Print trailer summary
-PRINT_TRAILER()
-{
- PRINT "*** finished tests in $HOSTNAME ***"
- date; TotalEndTime=`SecOfDay`
- PRINT "${HOSTNAME}: Ran $n_test($n_pass/$n_fail/$n_skip) $runtest_type, Grand total test time = " \
- "`ElapsedTime $TotalStartTime $TotalEndTime`" | tee -a $TIMELOG
- PRINT_BLANK
-}
-
-# Figure out which remote command to use to reach a host.
-# Try ssh first, then rsh since fewer machines support rsh exec.
-# $1--hostname to reach.
-CHECK_RSH()
-{
- # Figure out how to use ping command in this host.
- # Some hosts use "ping host count", some use "ping -c count host".
- # Test "ping -c 3 -w 5" since it has timeout feature.
- # Test "ping -c ..." style before "ping host 3" because some machines
- # that recognize -c treat 'ping localhost 3' as to ping host '3'.
- if [ -z "$PING" ]; then
- if ping -c 3 -w 5 localhost >/dev/null 2>&1; then
- PING='ping -c 3 -w 5'
- PINGCOUNT=
- elif ping -c 3 localhost >/dev/null 2>&1; then
- PING='ping -c 3'
- PINGCOUNT=
- elif ping localhost 3 >/dev/null 2>&1; then
- PING=ping
- PINGCOUNT=3
- else # don't know how to use ping.
- PING=no_ping
- PINGCOUNT=
- fi
- fi
- #
- host=$1
- # Try remote command with host if it responds to ping.
- # Still try it if we don't know how to do ping.
- if [ no_ping = "$PING" ] || $PING $host $PINGCOUNT >/dev/null 2>&1; then
- if ssh $host -n hostname >/dev/null 2>&1; then
- RSH=ssh
- elif rsh $host -n hostname >/dev/null 2>&1; then
- RSH=rsh
- else
- PRINT cannot remote command with $host
- RSH="NoRemoteCommand"
- fi
- else
- RSH="NotReachable"
- fi
-}
-
-
-# Wait for a file for at most number of minutes
-# $1--the file
-# $2--number of minutes
-# WAIT_STATUS set to:
-# -1 if errors encountered
-# 0 if file found within time limit
-# 1 if file not found within time limit
-WAITFOR()
-{
- wait_file=$1
- nminutes=$2
- if [ -z "$wait_file" -o ! "$nminutes" -ge 0 ]
- then
- PRINT "errors in argument of WAITFOR(): wait_file($1) or nminutes($2)"
- WAIT_STATUS=-1
- return
- fi
- while [ ! -f $wait_file ]; do
- if [ $nminutes -gt 0 ]; then
- PRINT "Wait For $wait_file to appear"
- sleep 60 #sleep 1 minute
- else
- WAIT_STATUS=1
- return
- fi
- nminutes=`expr $nminutes - 1`
- done
- WAIT_STATUS=0
- return
-}
-
-
-# Wait till a file disappears for at most number of minutes.
-# Useful to wait till a lock is removed by another process.
-# $1--the file
-# $2--number of minutes
-# WAIT_STATUS set to:
-# -1 if errors encountered
-# 0 if file disappears within time limit
-# 1 if file has not disappeared within time limit
-WAITTILL()
-{
- wait_file=$1
- nminutes=$2
- if [ -z "$wait_file" -o ! "$nminutes" -ge 0 ]
- then
- PRINT "errors in argument of WAITTILL(): wait_file($1) or nminutes($2)"
- WAIT_STATUS=-1
- return
- fi
- while [ -f $wait_file ]; do
- if [ $nminutes -gt 0 ]; then
- PRINT "Wait till $wait_file has disappeared"
- sleep 60 #sleep 1 minute
- else
- WAIT_STATUS=1
- return
- fi
- nminutes=`expr $nminutes - 1`
- done
- WAIT_STATUS=0
- return
-}
-
-
-# Run one snapshot test
-# $*--Types of test being run
-RUNSNAPTEST()
-{
- SNAPCMD_OPT="$STANDARD_OPT" # snapshot test option
- SRCDIRNAME=${HOSTNAME}
- # restore CC, PATH in case they were changed in the last test.
- CC="$CC_SAVED"
- PATH=$PATH_SAVED
- export PATH # DEC OSF1 needs to export PATH explicitly
- TEST_TYPE=$*
- retcode=0
- skiptest=no
- date
- PRINT "*** starting $TEST_TYPE tests in $HOSTNAME ***"
- PRINT "Uname -a: `uname -a`"
-
- # Parse the test type and set options accordingly.
- # See comments of SNAPTEST_CONFIG_PARSE().
- while [ $# -gt 0 ]; do
- case $1 in
- -n32) # want -n32 option
- SRCDIRNAME=${SRCDIRNAME}-n32
- CC="cc -n32"
- export CC
- ;;
- -64) # want -64 option
- SRCDIRNAME=${SRCDIRNAME}-64
- CC="cc -64"
- export CC
- ;;
- parallel) # want parallel test
- SNAPCMD_OPT="$SNAPCMD_OPT $ENABLE_PARALLEL"
- SRCDIRNAME=${SRCDIRNAME}-pp
- ;;
- standard) # standard test
- ;;
- --*)
- # option for configure
- SNAPCMD_OPT="$SNAPCMD_OPT $1"
- ;;
- op-configure)
- # option for configure
- SNAPCMD_OPT="$SNAPCMD_OPT $1 $2"
- shift
- ;;
- op-snapshot)
- # option for snapshot
- shift
- SNAPCMD_OPT="$SNAPCMD_OPT $1"
- ;;
- setenv)
- # pass them along to snapshot set environment variable
- shift
- SNAPCMD_OPT="$SNAPCMD_OPT setenv $1 $2"
- shift
- ;;
- setenvN)
- # set environment variable with $1 values
- # e.g., setenvN 3 x a b c is same as setenv x="a b c".
- # pass them along to snapshot set environment variable
- shift
- envN=$1
- shift
- envname=$1
- SNAPCMD_OPT="$SNAPCMD_OPT setenvN $envN $envname"
- envalue=
- while test $envN -gt 0; do
- shift
- envalue="$envalue $1"
- envN=`expr $envN - 1`
- done
- SNAPCMD_OPT="$SNAPCMD_OPT $envalue"
- ;;
- skip)
- # skip this test
- skiptest=yes
- ;;
- srcdirname)
- # Use this before using parallel and -n32 since this overrides
- # the others.
- shift
- SRCDIRNAME=$1
- ;;
- deploy)
- # deploy the built binary.
- shift
- SNAPCMD_OPT="$SNAPCMD_OPT deploy $1"
- ;;
- deploydir)
- # default directory for deployment.
- shift
- SNAPCMD_OPT="$SNAPCMD_OPT deploydir $1"
- ;;
- *) # unknown test
- PRINT "$0: unknown type of test ($1)"
- retcode=1
- ;;
- esac
- shift
- done
-
- if [ $retcode -ne 0 -o $skiptest = yes ]; then
- errcode=$retcode
- return $retcode
- fi
-
- # Track down the zlib software
- ans=`$SNAPYARD/current/bin/locate_sw zlib`
- if [ $? = 0 ]; then
- Z_INC=`echo $ans | cut -f1 -d,`
- Z_LIB=`echo $ans | cut -f2 -d,`
- SNAPCMD_OPT="$SNAPCMD_OPT zlib $Z_INC,$Z_LIB"
- else
- # cannot locate zlib software.
- # continue the test, maybe configure can find it.
- :
- fi
-
- if [ -n "${SRCDIRNAME}" ]; then
- SNAPCMD_OPT="$SNAPCMD_OPT srcdirname ${SRCDIRNAME}"
- fi
-
- # Setup log file name to save test output
- THIS_MINUTE=`date +%H%M`
- LOGFILE=${LOGBASENAME}/${SRCDIRNAME}_${TODAY}_${THIS_MINUTE}
- PRINT "Running snapshot with output saved in"
- PRINT " $LOGFILE"
- (date; PRINT Hostname=$HOSTNAME) >> $LOGFILE
-
- (
- cd $SNAPYARD/current
- $SNAPSHOT $SNAPCMD $SNAPCMD_OPT
- ) >> $LOGFILE 2>&1
- retcode=$?
- [ $retcode -ne 0 ] && errcode=$retcode
-
- date >> $LOGFILE
- if [ $retcode -ne 0 ]; then
- # Dump the first 10 lines and the last 30 lines of the LOGFILE.
- ( ntail=30
- echo =========================
- echo "Dumping logfile of ${HOSTNAME}: $TEST_TYPE"
- echo "Last $ntail lines of $LOGFILE"
- echo =========================
- tail -$ntail $LOGFILE
- echo =========================
- echo Dumping done
- echo =========================
- echo ""
- ) >> $FAILEDDETAIL
- fi
-}
-
-TIMELIMIT_PARSE()
-{
- # Function returns timeparam for timekeeper via standard out -
- # any debug statements should be 'echo "Debug string" >&2' or timekeeper
- # will declare timeparam to be non-numeric and ignore it.
- while read x y ; do
- # Scan for entry for this weekday.
- xd=`echo $x | cut -f1 -d/`
- if [ "$xd" = ${WEEKDAY} ]; then
- # strip away the weekday/ part.
- timeparam=`echo $x | cut -f2 -d/`
- break
- fi
- case "$x" in
- '' | '#'*)
- # blank or comment lines. Continue.
- ;;
- ???/*)
- # Ignore any entry not of this weekday.
- ;;
- *)
- timeparam="$x"
- ;;
- esac
- done
- echo $timeparam
- return
-}
-
-# configuration parsing.
-# Taking configuration from input.
-# This should be invoke with configure file as stdin.
-# Syntax of the configure file:
-# All lines started with the # are comment lines and are ignored.
-# Blank lines are ignored too.
-# Each config line starts with a "Scope" followed by test types.
-#
-# Scope can be:
-# standard ... # what the standard test types are.
-# <host>: <test> Do <test> for <host>
-# all: <test> Do <test> for all hosts.
-# <weekday>/... Use this scope if the <weekday> matches.
-# <weekday> can be {Mon,Tue,Wed,Thu,Fri,Sat,Sun}
-# If no <host>: input for a <host>, the standard test is used.
-#
-# Test types:
-# standard tests defined in standard scope.
-# -n32 -n32 mode. Apply to 64/32 bit OS such as IRIX64.
-# parallel parallel mode.
-# op-configure <option> configure option
-# op-snapshot <option> snapshot option
-# --* configure option
-# setenv <name> <value> set environment variable <name> to <value>
-# Pass along to snapshot
-# setenvN <N> <name> <value> ...
-# set environment variable with <N> values
-# e.g., setenvN 3 x a b c is same as setenv x="a b c".
-# Pass along to snapshot.
-# skip skip this test
-# srcdirname <name> use <name> as the build-directory.
-# deploy <name> deploy the built binary at directory <name>.
-# deploydir <name> use <name> as the default directory for deployment.
-SNAPTEST_CONFIG_PARSE()
-{
- while read x y ; do
- # Scan for entry for this weekday.
- xd=`echo $x | cut -f1 -d/`
- if [ "$xd" = ${WEEKDAY} ]; then
- # strip away the weekday/ part.
- x=`echo $x | cut -f2 -d/`
- fi
- case "$x" in
- '' | '#'*)
- # blank or comment lines. Continue.
- ;;
- ???/*)
- # Ignore any entry not of this weekday.
- ;;
- standard)
- #standard configuration
- STANDARD_OPT="$y"
- ;;
- all: | ${CONFIGNAME}:)
- # types of test for all hosts or this host
- if [ -n "$TEST_TYPES" ]; then
- TEST_TYPES="$TEST_TYPES ; $y"
- else
- TEST_TYPES="$y"
- fi
- ;;
- *:) # ignore types of test for other hosts
- ;;
- *) # unknown configuration option
- PRINT $x $y
- PRINT "***Unknown configuration option. Ignored.***"
- ;;
- esac
- done
-}
-
-# Snap Test configuration parsing.
-# If TEST_TYPES is not set, set it to do the "standard" test.
-SNAPTEST_CONFIG()
-{
- TEST_TYPES=
- STANDARD_OPT=
- if [ -f $SNAPTESTCFG ]; then
- SNAPTEST_CONFIG_PARSE < $SNAPTESTCFG
- fi
- TEST_TYPES=${TEST_TYPES:-'standard'}
-}
-
-
-# Show usage page
-USAGE()
-{
-cat <<EOF
-Usage: runtest [-h] [-debug] [-r<version>] [-all] [-nocvs] [-nodiff] [<host> ...]
- -h
- print this help page
- -debug
- turn on debug mode
- -r<version>
- do runtest for <version>
- -all
- launch tests for all pre-defined testing hosts
- -nocvs
- do not do cvs commands
- -nodiff
- do not do diff commands
- -setup
- setup the directory structure for snapshot test
- -configname <name>
- use <name> as hostname in the parsing of the snaptest configure file
- <host>
- launch tests for <host>
-
--all and <host> are contradictory and whichever is specified last, is
-the one to take effect. If neither are given, do the test for the
-local host.
-EOF
-}
-
-
-# Verify if directory ($1) exists. If not, create it.
-CHECK_DIR()
-{
- dir=$1
- if test ! -e $1; then
- echo mkdir $1
- mkdir $1
- errcode=$?
- elif test ! -d $1; then
- echo $1 is not a directory
- errcode=1
- fi
-}
-
-
-#################################
-# Main
-#################################
-#################################
-# Set up global variables
-#################################
-retcode=0 # error code of individula task
-errcode=0 # error code of the whole test
-skiptest=no # if test is skipped
-CC_SAVED="$CC" # CC & PATH maybe changed within a test.
-PATH_SAVED=$PATH # These save the original values.
-timelimit=300 # default time limit (minutes) for the timekeeper
-
-#################################
-# Parse options
-#################################
-while [ $# -gt 0 ]; do
- case "$1" in
- -h) # help--show usage
- USAGE
- exit 0
- ;;
- -debug*)
- # set debug mode
- DEBUGMODE="$1"
- SNAPSHOT="echo bin/snapshot"
- PROGNAME="$PROGNAME $DEBUGMODE"
- PRINT "******** DEBUGMODE is $DEBUGMODE ************"
- ;;
- -r*)
- # version string
- H5VER="$1"
- ;;
- -all)
- # Test all hosts.
- TESTHOST=-all
- ;;
- -nocvs)
- # do not do cvs commands.
- NOCVS=nocvs
- ;;
- -nodiff)
- # do not do diff commands.
- NODIFF=nodiff
- ;;
- -configname)
- # use <name> as hostname in the parsing of the snaptest configure file.
- shift
- CONFIGNAME=$1
- ;;
- -setup)
- # setup the directory structure for snapshot test.
- CMD=setup
- ;;
- -*) # Unknown option
- PRINT "Unknown option ($1)"
- USAGE
- exit 1
- ;;
- *)
- TESTHOST=$*
- break
- ;;
- esac
- shift
-done
-
-# setup H5VER if not set yet
-if [ -z "$H5VER" -a -f bin/snapshot_version ]
-then
- . bin/snapshot_version
-fi
-
-if [ -n "$H5VER" ]
-then
- H5VERSION=hdf5_`echo $H5VER | sed -e s/-r// -e s/\\\./_/g`
- PROGNAME="$PROGNAME $H5VER"
-else
- H5VERSION=hdf5
-fi
-
-#################################
-# Setup snapshot test directories
-#################################
-BASEDIR=${HOME}/snapshots-${H5VERSION}
-# initial processing of setup option if requested
-if test x-$CMD = x-setup; then
- CHECK_DIR $BASEDIR
- test $errcode -ne 0 && exit 1
-elif [ ! -d ${BASEDIR} ]; then
- echo "BASEDIR ($BASEDIR) does not exist"
- exit 1
-fi
-# Show the real physical path rather than the symbolic path
-SNAPYARD=`cd $BASEDIR && /bin/pwd`
-# Log file basename
-LOGDIR=${SNAPYARD}/log
-LOGBASENAME=${LOGDIR}
-PASSEDLOG=${LOGDIR}/PASSED_LOG_${TODAY}
-FAILEDLOG=${LOGDIR}/FAILED_LOG_${TODAY}
-FAILEDDETAIL=${LOGDIR}/FAILED_DETAIL_${TODAY}
-SKIPPEDLOG=${LOGDIR}/SKIPPED_LOG_${TODAY}
-TIMELOG=${LOGDIR}/TIME_LOG_${TODAY}
-TIMEKEEPERLOG=${LOGDIR}/TIMEKEEPER_LOG_${TODAY}
-CVSLOG=${LOGDIR}/CVS_LOG_${TODAY}
-CVSLOG_LOCK=${LOGDIR}/CVS_LOG_LOCK_${TODAY}
-DIFFLOG=${LOGDIR}/DIFF_LOG_${TODAY}
-COPYRIGHT_ERR=${LOGDIR}/COPYRIGHT_ERR_${TODAY}
-# Snap Test hosts and Configuration files
-ALLHOSTSFILE=${SNAPYARD}/allhostfile
-SNAPTESTCFG=${SNAPYARD}/snaptest.cfg
-TIMELIMIT=${SNAPYARD}/timelimit
-TMPFILE="${LOGDIR}/#runtest.${TODAY}.$$"
-
-# more processing of setup option if requested
-if test x-$CMD = x-setup; then
- CHECK_DIR $LOGDIR
- test $errcode -ne 0 && exit 1
- CHECK_DIR $LOGDIR/OLD
- test $errcode -ne 0 && exit 1
- CHECK_DIR $SNAPYARD/TestDir
- test $errcode -ne 0 && exit 1
- # create empty test hosts or configure files if non-existing
- for f in $ALLHOSTSFILE $SNAPTESTCFG; do
- if test ! -f $f; then
- echo Creating $f
- touch $f
- fi
- done
- # create or update the current source.
- echo update current source
- $SNAPSHOT checkout
- # setup completed. Exit.
- exit 0
-fi
-
-#################################
-# Show some host status numbers
-#################################
-# df sometimes hangs due to file system problems. Invoke it as background
-# process and give it 10 seconds to finish. If it hangs, just continue.
-uptime
-df &
-sleep 10
-
-#################################
-# Setup test host(s)
-#################################
-if [ "$TESTHOST" = -all ]; then
- if [ -f $ALLHOSTSFILE ]; then
- TESTHOST=`sed -e '/^#/d;/^ *$/d' $ALLHOSTSFILE`
- else
- PRINT "could not access the all-hosts-file ($ALLHOSTSFILE)"
- USAGE
- exit 1
- fi
-fi
-
-
-#################################
-# Setup to print a trailer summary when exiting not via
-# the normal end of the script.
-#################################
-trap PRINT_TRAILER 0
-
-#
-TotalStartTime=`SecOfDay`
-
-# Process the configuration
-SNAPTEST_CONFIG
-PRINT STANDARD_OPT=$STANDARD_OPT
-PRINT TEST_TYPES=$TEST_TYPES
-PRINT_BLANK
-
-# Do a checkout if one has not been done today.
-# Then check MANIFEST file and copyrights noitces.
-if [ -z "$NOCVS" ]; then
- PRINT "Running CVS checkout with output saved in"
- PRINT " $CVSLOG"
- # Set CVS lock first
- touch $CVSLOG_LOCK
- ($SNAPSHOT checkout ) >> $CVSLOG 2>&1
- # Save error code and remove the lock
- errcode=$?
- rm -f $CVSLOG_LOCK
- if [ $errcode -ne 0 ]; then
- # test failed.
- REPORT_ERR "****FAILED ${HOSTNAME}: CVS checkout****"
- exit $errcode
- fi
- # ===================
- # Check MANIFEST file
- # ===================
- PRINT Checking MAINFEST file ...
- (cd $SNAPYARD/current; bin/chkmanifest) > $TMPFILE 2>&1
- errcode=$?
- if [ $errcode -eq 0 ]; then
- # test passed.
- cat $TMPFILE
- else
- # test failed.
- REPORT_ERR "****FAILED ${HOSTNAME}: MANIFEST check****"
- ( echo =========================
- echo "MANIFEST checking failed output"
- echo =========================
- cat $TMPFILE
- echo =========================
- echo "MANIFEST checking failed output done"
- echo =========================
- echo ""
- ) >> $FAILEDDETAIL
- fi
- rm $TMPFILE
- PRINT_BLANK
- # No copyright checking until what need copyright is decided. 2006/4/7.
- if false; then
- # ======================
- # Check Copyright notice
- # ======================
- PRINT Checking Copyrights notices ...
- if (cd $SNAPYARD/current; bin/chkcopyright) > $TMPFILE 2>&1 ; then
- echo Passed.
- else
- # Save the output and report some of it.
- # Do not report it as failed for runtest yet.
- # Send a separate report mail via hardcoding.
- # Need fixes/cleanup later.
- echo "Failed. See detail in another report mail"
- cp $TMPFILE $COPYRIGHT_ERR
- nheadlines=300
- ntaillines=5 # Number of lines in report summary.
- (
- echo =========================
- echo "Copyright checking failed. Showing first $nheadlines lines of output."
- echo "Complete output is in file $COPYRIGHT_ERR"
- echo =========================
- nreportlines=`wc -l < $COPYRIGHT_ERR`
- if [ $nreportlines -le `expr $nheadlines + $ntaillines` ]; then
- # Just print the whole file.
- cat $COPYRIGHT_ERR
- else
- # Show the first $nheadlines plus report summary
- head -$nheadlines $COPYRIGHT_ERR
- echo ...
- tail -$ntaillines $COPYRIGHT_ERR
- fi
- ) | Mail -s "${H5VERSION} Copyrights check Failed" hdf5lib
- fi
- rm $TMPFILE
- PRINT_BLANK
- fi
-else
- # make sure the cvs update, if done by another host, has completed.
- # First wait for the presence of $CVSLOG which signals some host
- # has started the cvs update. Then wait for the absence of $CVSLOG_LOCK
- # which signals the host has completed the cvs update.
- WAITFOR $CVSLOG 90
- if [ $WAIT_STATUS -ne 0 ]; then
- errcode=$WAIT_STATUS
- REPORT_ERR "****FAILED ${HOSTNAME}: Time expired waiting CVS update to start****"
- exit $errcode
- fi
- WAITTILL $CVSLOG_LOCK 10
- if [ $WAIT_STATUS -ne 0 ]; then
- errcode=$WAIT_STATUS
- REPORT_ERR "****FAILED ${HOSTNAME}: Time expired waiting CVS update to finish****"
- exit $errcode
- fi
-fi
-
-# run a snapshot diff to see if any significant differences between
-# the current and previous versions
-if [ -z "$NODIFF" ]; then
- $SNAPSHOT diff >> $DIFFLOG 2>&1
- errcode=$?
- # check the errcode only if NOT in DEBUG MODE
- if [ -z "$DEBUGMODE" -a $errcode -eq 0 ]; then
- # no need to run test
- PRINT "NO TEST: no significant differences between current and previous versions" |
- tee -a $PASSEDLOG
- exit 0
- fi
-fi
-
-# we can use the version of script in SNAPYARD/current now.
-# Don't do the diff or cvs update any more.
-PROGNAME="$SNAPYARD/current/$PROGNAME -nodiff -nocvs"
-
-# Decide to do test for the local host or for remote hosts
-if [ -n "$TESTHOST" -a $HOSTNAME != "$TESTHOST" ]; then
- date
- PRINT "*** launching tests from $HOSTNAME ***"
- PRINT_BLANK
- TEST_TYPE="launching"
- cd ${SNAPYARD}/log
- # Fork off timekeeper if concurrent tests will be used.
- if [ -n "$SRCDIR" ]; then
- timelimit=`TIMELIMIT_PARSE < $TIMELIMIT`
- ($SNAPYARD/current/bin/timekeeper $timelimit > $TIMEKEEPERLOG 2>&1 &)
- PRINT " Fork off timekeeper $timelimit"
- fi
- runtest_type="hosts"
- for h in $TESTHOST; do
- # Must do CONFIGNAME before $h got changed by the second cut.
- # cut returns the whole string if there is no / in the string
- # at all. But that works okay for the CONFIGNAME too.
- CONFIGNAME=`echo $h | cut -f2 -d/`
- h=`echo $h | cut -f1 -d/`
- n_test=`expr $n_test + 1`
- TMP_OUTPUT="#${h}_${CONFIGNAME}.out"
- (PRINT "=============="
- PRINT "Testing $h"
- PRINT "==============") > $TMP_OUTPUT
- CHECK_RSH $h
- # run the remote shell command with output to $TMP_OUTPUT
- case "$RSH" in
- rsh|ssh)
- CMD="$RSH $h -n $PROGNAME -configname $CONFIGNAME"
- PRINT $CMD
-
- # launch concurrent tests only if srcdir is used
- if [ -n "$SRCDIR" ]; then
- $CMD || REPORT_ERR "****FAILED ${h}: Abnormal exit from runtest****" && PRINT_BLANK &
- echo $! > PID.${h}_${CONFIGNAME}
- else
- $CMD || REPORT_ERR "****FAILED ${h}: Abnormal exit from runtest****" && PRINT_BLANK
- fi
- ;;
- NoRemoteCommand)
- PRINT $h does not accept Remote Command "(`date`)"
- ;;
- NotReachable)
- PRINT $h is not reachable "(`date`)"
- ;;
- *)
- PRINT "CHECK_RSH for $h returned unknown result ($RSH)"
- ;;
- esac >> $TMP_OUTPUT 2>&1
- done
- # wait for all launched tests to finish, then cat them back out.
- wait
- # Pause a moment in case the timekeeper is terminating processes.
- wait 30
- for h in $TESTHOST; do
- CONFIGNAME=`echo $h | cut -f2 -d/`
- h=`echo $h | cut -f1 -d/`
- TMP_OUTPUT="#${h}_${CONFIGNAME}.out"
- cat $TMP_OUTPUT
- # Verify test script did complete by checking the last lines
- (tail -5 $TMP_OUTPUT | grep -s 'Grand total' > /dev/null 2>&1) ||
- (REPORT_ERR "****FAILED ${h}: snaptest did not complete****" &&
- PRINT_BLANK)
- rm -f $TMP_OUTPUT PID.${h}_${CONFIGNAME}
- done
- exit 0
-fi
-
-# run the test(s)
-# Note that first field is cut without -s but all subsequent cut
-# must use -s. If -s is not used at all, a $TEST_TYPES that has
-# no ';' (only 1 test), will pass through intact in all cut. That
-# results in infinite looping.
-# If -s is used with the first field, it will suppress completely
-# a $TYPE_TYPES that has no ';' (only 1 tst ). That results in no
-# test at all.
-# Note that n_test must start as 1.
-#
-n_test=1
-runtest_type="tests"
-TEST="`echo $TEST_TYPES | cut -f$n_test -d';'`"
-while [ -n "$TEST" ]; do
- StartTime=`SecOfDay`
- RUNSNAPTEST $TEST
- REPORT_RESULT
- PRINT_TEST_TRAILER
-
- n_test=`expr $n_test + 1`
- TEST="`echo $TEST_TYPES | cut -f$n_test -s -d';'`"
-done
-# dec n_test to show the actual number of tests ran.
-n_test=`expr $n_test - 1`
-
-PRINT_TRAILER
-
-# disable trailer summary printing since all trailers have been
-# printed and we are exiting normally.
-trap 0
-exit $errcode
diff --git a/bin/snapshot b/bin/snapshot
deleted file mode 100755
index 7fcf3ab..0000000
--- a/bin/snapshot
+++ /dev/null
@@ -1,837 +0,0 @@
-#!/bin/sh
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-
-# This script should be run nightly from cron. It checks out the source
-# from the source repository and compares it against the previous
-# snapshot. If anything significant changed then a new snapshot is
-# created, the minor version number is incremented, and the change is
-# checked back into the source repository.
-#
-
-
-# function definitions
-TIMESTAMP()
-{
- echo "=====" "$1": "`date`" "====="
-}
-
-EXIT_BANNER()
-{
- TIMESTAMP "Exit $PROGNAME with status=$?"
-}
-
-# Show current total disk usage.
-DISKUSAGE()
-{
- du -ks | \
- ( read x y; echo "Disk Usage=$x KB" )
-}
-
-# function provided for testing software downloaded as tar files. A version of
-# this function that properly extracts the downloaded files can be provided in
-# the snapshots-${sw}-overrides file.
-EXTRACT()
-{
- echo "Error: ${SWVERSION} is in source repository - does not need extraction."
-}
-
-# Standard procedure for checking out or updating source code from an hdfgroup
-# git repository. Override the function for other repositories or procedures.
-SOURCE_CHECKOUT()
-{
- if test -n $GIT_URL; then
- if [ -n "$AUTOGEN" ]; then
- echo "Creating fresh clone of $GIT_URL in $BASEDIR/current_src"
- # Check out the current version from source repository.
- (cd $BASEDIR; rm -rf current_src
- if test -z $GIT_BRANCH; then
- echo "Testing empty branch $GIT_BRANCH."
- git clone $GIT_URL current_src
- else
- echo "Testing branch $GIT_BRANCH."
- git clone $GIT_URL -b $GIT_BRANCH current_src
- fi
- ) || exit 1
- else
- echo "Creating fresh clone of $GIT_URL in $BASEDIR/current"
- # Check out the current version from source repository.
- (cd $BASEDIR; rm -rf current
- if test -n $GIT_BRANCH; then
- git clone $GIT_URL -b $GIT_BRANCH current
- else
- git clone $GIT_URL current
- fi ) || exit 1
- fi
- else
- echo "Warning! Source directory ("current") is not checked out from git."
- fi
-}
-
-# Standard procedure for running the configure command in a build (test)
-# directory
-RUNCONFIGURE()
-{
- if [ "${CURRENT}" != "${TESTDIR}" -a "$CPSRC" = "yes" ]; then
- echo "Copying source files to ${TESTDIR}."
- cp -pr ${CURRENT}/* ${TESTDIR}
- cd ${TESTDIR}
- ./${CONFIGURE}
- elif [ -n "${AUTOGEN}" ]; then
- ${CURRENTSRC}/${CONFIGURE}
- else
- ${CURRENT}/${CONFIGURE}
- fi
-}
-
-# Sometimes "make distclean" doesn't adequately remove files from the previous
-# build. If a build (test) directory was used, its contents can be entirely
-# deleted to provide a clean start. If the test is building in the source
-# directory, the contents can't be deleted, so run "make distclean".
-DISTCLEAN()
-{
- if [ "${srcdir}" = "yes" -a -n "${SRCDIRNAME}" -a -d ${BASEDIR}/TestDir/${SRCDIRNAME} ]; then
- echo "Remove contents of $SRCDIRNAME.\n"
- rm -rf ${BASEDIR}/TestDir/${SRCDIRNAME}/*
- else
- echo "$MAKE distclean"
- (cd ${TESTDIR} && ${MAKE} distclean)
- fi
-}
-
-# Several of the software packages tested do not support make check-install.
-# Those that support it should have a version of this function in their
-# override with the following lines:
-# TIMESTAMP "check-install $1"
-# ${MAKE} check-install $1
-CHECKINSTALL()
-{
- echo "check-install is not supported for ${SWVERSION}"
-}
-
-# Function for hdf4 and hdf5 to override to check in changes after snapshot.
-# Safety measure to avoid unintended checkins to other repositories.
-COMMITSNAPSHOT()
-{
- echo "original hdf5 script committed code changes back into git."
-}
-
-DISPLAYUSAGE()
-{
- set -
- cat <<EOF
-Usage: $PROGNAME [all] [checkout] [ftp <URL> [diff] [test] [srcdir] [release] [help]
- [clean] [distclean] [echo] [deploy <dir>] [deploydir <dir>]
- [zlib <zlib_path>] [releasedir <dir>] [srcdirname <dir>] [check-vfd]
- [check-passthrough-vol]
- [exec <command>] [module-load <module-list>] [op-configure <option>]
- [--<option>]
- all: Run all commands (checkout, test & release)
- [Default is all]
- checkout: Run source checkout
- diff: Run diff on current and previous versions. Exit 0 if
- no significant differences are found. Otherwise, non-zero.
- deploy: deploy binary to directory <dir>
- deploydir: use <dir> as the default directory for deployment
- test: Run test
- release: Run release
- clean: Run make clean
- distclean:Run make distclean
- echo: Turn on echo mode (set -x)
- setenv <name> <value>:
- Set environment variable <name> to <value>.
- setenvN <N> <name> <value> ...:
- Set environment variable with <N> values.
- E.g., setenvN 3 x a b c is same as setenv x="a b c".
- srcdir: Use srcdir option (does not imply other commands)
- "snapshot srcdir" is equivalent to "snapshot srcdir all"
- "snapshot srcdir checkout" is equivalent to "snapshot checkout"
- srcdirname <dir>:
- Use <dir> as the srcdir testing directory if srcdir is chosen.
- If <dir> starts with '-', it is append to the default name
- E.g., "snapshot srcdir srcdirname -xx" uses hostname-xx
- [Default is hostname]
- help: Print this message
- echo: Turn on shell echo
- zlib <zlib_path>:
- Use <zlib_path> as the ZLIB locations
- [Default is $ZLIB_default]
- releasedir <dir>:
- Use <dir> as the release directory
- [Default is $ReleaseDir_default]
- check-vfd:
- Run make check-vfd instead of just make check.
- check-passthrough-vol:
- Run make check-passthrough-vol instead of just make check.
- NOTE: Will only succeed with passthrough VOL connectors
- that use the native VOL connector as the terminal
- connector.
- exttest <testscript>;
- Run testscript;
- exec <command>:
- Run <command>;
- module-load <module-list>:
- Load modules in comma-separated <module-list>;
- op-configure <option>:
- Pass <option> to the configure command
- E.g., "snapshot op-configure --enable-parallel"
- configures for parallel mode
- --<option>:
- Pass --<option> to the configure command
- E.g., "snapshot --enable-parallel"
- configures for parallel mode
-EOF
- exit $errcode
-}
-
-# MAIN
-# SGI /bin/sh replaces $0 as function name if used in a function.
-# Set the name here to avoid that ambiguity and better style too.
-PROGNAME=$0
-SNAPSHOTNAME=
-HDFREPOS=
-DOCVERSION=""
-MODULELIST=""
-
-if [ -f bin/snapshot_params ]; then
- . bin/snapshot_params
- echo "Added snapshot_params."
-fi
-if [ -z "$SWVER" -a -f bin/snapshot_version ]
-then
- . bin/snapshot_version
- echo "Added snapshot_version."
-fi
-if [ -n ${HDFREPOS} -a -f bin/snapshot-${HDFREPOS}-overrides ]; then
- . bin/snapshot-${HDFREPOS}-overrides
- echo "Added snapshot-${HDFREPOS}-overrides."
-fi
-
-echo "====================================="
-echo "$PROGNAME $*"
-echo "====================================="
-TIMESTAMP MAIN
-uname -a
-
-# setup exit banner message
-trap EXIT_BANNER 0 1 2 9 15
-
-# Dump environment variables before option parsing
-echo ===Dumping environment variables before option parsing ===
-printenv | sort
-echo ===Done Dumping environment variables before option parsing ===
-
-# snapshots release directory. Default relative to $BASEDIR.
-ReleaseDir_default=release_dir
-
-# Where is the zlib library?
-# At NCSA, half of the machines have it in /usr/lib, the other half at
-# /usr/ncsa/lib. Leave it unset.
-ZLIB_default=
-ZLIB=$ZLIB_default
-
-# What compression methods to use? (md5 does checksum). Doc was apparently
-# added as a compression method to create a separate tarfile containing the
-# documentation files for v 1.8 and above.
-if [ "${SWVERSION}" = "hdf5_1_6" ]; then
- METHODS="gzip bzip2 md5"
-else
- METHODS="gzip bzip2 doc"
-fi
-
-# Use User's MAKE if set. Else use generic make.
-MAKE=${MAKE:-make}
-
-# Default check action.
-CHECKVAL=check
-
-#
-# Command options
-cmd="all"
-test_opt=""
-errcode=0
-AUTOGEN=""
-EXTTEST=""
-EXEC_CMD_ARG=""
-while [ $# -gt 0 ] ; do
- case "$1" in
- all)
- cmd="all"
- ;;
- checkout-autogen)
- cmdcheckout="checkout"
- AUTOGEN="autogen"
- cmd=""
- ;;
- checkout)
- cmdcheckout="checkout"
- cmd=""
- ;;
- ftp)
- echo "Setting ftp flags in snapshot script"
- cmdcheckout="checkout"
- cmdftp="ftp"
- cmd=""
- shift
- if [ $# -lt 1 ]; then
- echo "URL missing"
- errcode=1
- cmd="help"
- break
- fi
- ftp_url="$1"
- echo "ftp_url is $ftp_url"
- ;;
- diff)
- cmddiff="diff"
- cmd=""
- ;;
- deploy)
- # deploy the built binary.
- shift
- if [ $# -lt 1 ]; then
- echo "deploy <dir> missing"
- errcode=1
- cmd="help"
- break
- fi
- cmddeploy="deploy"
- DEPLOYDIRNAME="$1"
- ;;
- deploydir)
- # default directory for deployment.
- shift
- if [ $# -lt 1 ]; then
- echo "deploydir <dir> missing"
- errcode=1
- cmd="help"
- break
- fi
- deploydir="$1"
- ;;
- test)
- cmdtest="test"
- cmd=""
- ;;
- setenv)
- # set environment variable
- shift
- eval $1="$2"
- export $1
- shift
- ;;
- setenvN)
- # set environment variable with $1 values
- # e.g., setenvN 3 x a b c is same as setenv x="a b c".
- # a kludge now--the extra single quotes are needed
- # else eval complains.
- shift
- envN=$1
- shift
- envname=$1
- envalue=
- while test $envN -gt 0; do
- shift
- envalue="$envalue $1"
- envN=`expr $envN - 1`
- done
- eval $envname="'$envalue'"
- export $envname
- ;;
- srcdir)
- #use srcdir option for test
- srcdir="yes"
- ;;
- srcdirname)
- shift
- if [ $# -lt 1 ]; then
- echo "srcdirname <dir> missing"
- errcode=1
- cmd="help"
- break
- fi
- SRCDIRNAME="$1"
- ;;
- release)
- cmdrel="release"
- cmd=""
- ;;
- autogen-release)
- cmdrel="autogen-release"
- cmd=""
- ;;
- clean | distclean)
- cmdclean="$1"
- cmd=""
- ;;
- help)
- cmd="help"
- break
- ;;
- echo)
- set -x
- break
- ;;
- zlib)
- shift
- if [ $# -lt 1 ]; then
- echo "ZLIB information missing"
- errcode=1
- cmd="help"
- break
- fi
- ZLIB="$1"
- ;;
- releasedir)
- shift
- if [ $# -lt 1 ]; then
- echo "Release directory name missing"
- errcode=1
- cmd="help"
- break
- fi
- ReleaseDir="$1"
- ;;
- exttest)
- shift
- if [ $# -lt 1 ]; then
- echo "exttest script name missing"
- errcode=1
- cmd="help"
- break
- fi
- cmd=""
- EXTTEST="$1"
- ;;
- exec)
- shift
- if [ $# -lt 1 ]; then
- echo "exec command name missing"
- errcode=1
- cmd="help"
- break
- fi
- cmd=""
- EXEC_CMD_ARG="$@"
- # exit the parsing while loop since all arguments have been consumed.
- break
- ;;
- check-vfd)
- CHECKVAL=check-vfd
- ;;
- check-passthrough-vol)
- CHECKVAL=check-passthrough-vol
- ;;
- module-load)
- shift
- if [ $# -lt 1 ]; then
- echo "missing module list to load"
- errcode=1
- cmd="help"
- break
- fi
- MODULELIST="$1"
- ;;
- --*)
- OP_CONFIGURE="$OP_CONFIGURE $1"
- ;;
- op-configure)
- shift
- if [ $# -lt 1 ]; then
- echo "op-configure option missing"
- errcode=1
- cmd="help"
- break
- fi
- OP_CONFIGURE="$OP_CONFIGURE $1"
- ;;
- *)
- echo "Unknown option $1"
- errcode=1
- cmd="help"
- break
- ;;
- esac
- shift
-done
-
-if [ -n "$MODULELIST" ]; then
- . ~/.bashrc
- module use /opt/pkgs/modules/all
- # load module command will take a space separated list of modules.
- # If we have a comma separated list, convert ',' to ' '.
- MODULELIST="$( echo -e "$MODULELIST" | tr ',' ' ' )"
- module load $MODULELIST
-fi
-
-# Dump environment variables after option parsing
-echo ===Dumping environment variables after option parsing ===
-printenv | sort
-echo ===Done Dumping environment variables after option parsing ===
-
-if [ "$cmd" = help ]; then
- DISPLAYUSAGE
-fi
-
-# Setup the proper configure option (--with-zlib) to use zlib library
-# provide ZLIB is non-empty.
-ZLIB=${ZLIB:+"--with-zlib="$ZLIB}
-# Adding --prefix as a configure option will put the path to the deploy
-# directory in the initial libhdf5*.la files
-if [ -n "$DEPLOYDIRNAME" ]; then
- OP_CONFIGURE="$OP_CONFIGURE --prefix=${deploydir}/${DEPLOYDIRNAME}"
-fi
-CONFIGURE="configure $OP_CONFIGURE"
-# echo "Configure command is $CONFIGURE"
-
-# Execute the requests
-snapshot=yes
-
-BASEDIR=${HOME}/snapshots-${SNAPSHOTNAME}
-if [ ! -d ${BASEDIR} ]; then
- echo "BASEDIR ($BASEDIR) does not exist"
- exit 1
-fi
-
-CURRENT=${BASEDIR}/current
-PREVIOUS=${BASEDIR}/previous
-ReleaseDir=${ReleaseDir:=${BASEDIR}/${ReleaseDir_default}}
-HOSTNAME=`hostname | cut -f1 -d.` # no domain part
-
-# Try finding a version of diff that supports the -I option too.
-DIFF=diff
-for d in `echo $PATH | sed -e 's/:/ /g'` ; do
- test -x $d/diff && $d/diff -I XYZ /dev/null /dev/null > /dev/null 2>&1 &&
- DIFF=$d/diff && break
-done
-
-#=============================
-# Run source checkout
-#=============================
-if [ "$cmd" = "all" -o -n "$cmdcheckout" ]; then
- TIMESTAMP "checkout"
- # ${BASEDIR}/bin is now updated from git by EveningMaint or DailyMaint
- # to avoid updating the scripts in ${BASEDIR}/bin while they are running.
-
- if [ -z "$AUTOGEN" ]; then
- # If there is a Makefile in ${CURRENT}, the last test done in it
- # has not been distclean'ed. They would interfere with other
- # --srcdir build since make considers the files in ${CURRENT}
- # take precedence over files in its own build-directory. Run
- # a "make distclean" to clean them all out. This is not really
- # part of the "checkout" functions but this is the most convenient
- # spot to do the distclean. We will also continue the checkout process
- # regardless of the return code of distclean.
- ( cd ${CURRENT}; test -f Makefile && ${MAKE} distclean)
- fi
- # echo "cmdftp is $cmdftp; ftp_url is $ftp_url"
- if [ -n "$cmdftp" ]; then
- echo "Get the NetCDF4 source from their ftp server."
- echo "Command executed is: 2>&1 wget -N $ftp_url"
- cd ${BASEDIR};
- WGET_OUTPUT="`2>&1 wget -N $ftp_url`"
- errcode=$?
- if [[ $errcode -ne 0 ]]; then
- exit $errcode
- fi
-
- if [ $? -ne 0 ];then
- echo $0: "$WGET_OUTPUT" Exiting.
- exit 1
- fi
-
- # echo "Wget output was $WGET_OUTPUT"
-
- if echo "$WGET_OUTPUT" | fgrep 'not retrieving' &> /dev/null
- then
- echo "Snapshot unchanged"
- else
- echo "New snapshot downloaded"
- EXTRACT
- fi
- else
- SOURCE_CHECKOUT
- fi
-fi # Do source checkout
-
-
-#=============================
-# Run Test the HDF5 library
-#=============================
-if [ "$cmd" = "all" -o -n "$cmdtest" -o -n "$cmddiff" ]; then
- TIMESTAMP "Run Tests"
- # setup if srcdir is used.
- if [ -z "$srcdir" ]; then
- TESTDIR=${CURRENT}
- else
- #create TESTDIR if not exist yet
- case "$SRCDIRNAME" in
- "")
- SRCDIRNAME=$HOSTNAME
- ;;
- -*)
- SRCDIRNAME="$HOSTNAME$SRCDIRNAME"
- ;;
- esac
- TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
- test -d ${TESTDIR} || mkdir ${TESTDIR}
- # set TESTDIR to use the direct path to the local test directory
- # rather than the path through ${BASEDIR}.
- cd ${TESTDIR}
- TESTDIR=`pwd -P`
- cd ${CURRENT}
- fi
- # Make sure current version exists and is clean
- if [ -d ${TESTDIR} ]; then
- DISTCLEAN
- else
- errcode=$?
- snapshot=no
- exit $errcode
- fi
-
- # Compare it with the previous version. Compare only files listed in
- # the MANIFEST plus the MANIFEST itself.
- if [ -d ${PREVIOUS} ]; then
- if [ -z "${AUTOGEN}" ]; then
- CURRENTSRC=${CURRENT}
- else
- CURRENTSRC=${BASEDIR}/current_src
- fi
- if (${DIFF} -c ${PREVIOUS}/MANIFEST ${CURRENTSRC}/MANIFEST); then
- snapshot=no
- for src in `grep '^\.' ${CURRENTSRC}/MANIFEST|expand|cut -f1 -d' '`; do
- if ${DIFF} -I H5_VERS_RELEASE -I " released on " \
- -I " currently under development" \
- ${PREVIOUS}/$src ${CURRENTSRC}/$src
- then
- : #continue
- else
- snapshot=yes
- break
- fi
- done
- fi
- fi
-
- # if diff is chosen, exit 0 if no significant differences are found.
- # otherwise, exit 1. This includes cases of other failures.
- if [ -n "$cmddiff" ]; then
- if [ $snapshot = no ]; then
- exit 0
- else
- exit 1
- fi
- fi
-
- #=============================
- # Execute command if defined
- #=============================
- #echo BEFORE EXEC command
- #echo EXEC_CMD_ARG=${EXEC_CMD_ARG}
-
- if [ -n "$EXEC_CMD_ARG" ]; then
- TIMESTAMP ${EXEC_CMD_ARG}
- TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
- test -d ${TESTDIR} || mkdir ${TESTDIR}
- if cd ${TESTDIR}; then
- # clean up the directory before executing the command
- # Do we need to clean first?
- # rm -rf *
- #
- # If EXEC_CMD_ARG starts with a '/', it has an absolute path, else it is
- # relative to the BASEDIR.
- case "$EXEC_CMD_ARG" in
- /*)
- ${EXEC_CMD_ARG}
- ;;
- *)
- ${BASEDIR}/${EXEC_CMD_ARG}
- ;;
- esac
- errcode=$?
- else
- echo "${TESTDIR} not accessible"
- errcode=1
- fi
- # exit snapshot since nothing else to do, for now.
- exit $errcode
- fi
-
- # Build, run tests and install procedures
- if [ "$snapshot" = "yes" ] && [ "$NOMAKE" != "yes" ]; then
- FAIL_SECTION=""
- if [ -f ${TESTDIR}/failsection ]; then
- rm ${TESTDIR}/failsection
- fi
- if (cd ${TESTDIR} && \
- TIMESTAMP "configure" && echo "configure" > ${TESTDIR}/failsection && \
- RUNCONFIGURE && \
- sleep 2 && \
- TIMESTAMP "make" && echo "make" > ${TESTDIR}/failsection && \
- ${MAKE} && DISKUSAGE \
- TIMESTAMP ${CHECKVAL} && echo "make check" > ${TESTDIR}/failsection && \
- ${MAKE} ${CHECKVAL} && DISKUSAGE \
- TIMESTAMP "install" && echo "make install" > ${TESTDIR}/failsection && \
- ${MAKE} install && DISKUSAGE \
- TIMESTAMP "check-install" && echo "make check-install" > ${TESTDIR}/failsection && \
- CHECKINSTALL && DISKUSAGE \
- TIMESTAMP "uninstall" && echo "make uninstall" > ${TESTDIR}/failsection && \
- ${MAKE} uninstall && DISKUSAGE); then
- :
- else
- errcode=$?
- FAIL_SECTION=`cat ${TESTDIR}/failsection`
- echo "Failed running ${FAIL_SECTION}"
- snapshot=no
- exit $errcode
- fi
- elif [ $CPSRC ]; then
- cp -pr ${CURRENT}/* ${TESTDIR}
- else
- cmdclean=""
- fi
-fi # Test the HDF5 library
-
-# Run external test if configured
-
-#=============================
-#=============================
-#if [ -d "$CURRENT" ]; then
-if [ "$EXTTEST" != "" ]; then
- TIMESTAMP ${EXTTEST}
- TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
- test -d ${TESTDIR} || mkdir ${TESTDIR}
- cd ${TESTDIR}
- sleep 1
- TIMESTAMP $pwd
- ls
- ${BASEDIR}/${EXTTEST}
- errcode=$?
- exit $errcode
-fi
-
-#=============================
-# Run deployment if requested.
-#=============================
-if [ -n "$DEPLOYDIRNAME" ]; then
- # The daily tests deploy to .../hdf5/... or .../hdf4/... except on cobalt where the
- # deploy directory is in .../HDF5/... lc will take care of this. If hdf4 or hdf5
- # either upper or lower case isn't in the path, RELEASE.txt won't be found unless
- # it is in $CURRENT.
- POS4=`perl -e "print index(lc(\"${deploydir}/${DEPLOYDIRNAME}\"), 'hdf4')"`
- POS5=`perl -e "print index(lc(\"${deploydir}/${DEPLOYDIRNAME}\"), 'hdf5')"`
- if [ "${POS4}" -ge "0" ]; then
- RELEASE_TXT_LOC="release_notes"
- elif [ "${POS5}" -ge "0" ]; then
- RELEASE_TXT_LOC="release_docs"
- else
- RELEASE_TXT_LOC=""
- fi
-
- if [ "$snapshot" = "yes" ]; then
- TIMESTAMP "deploy"
- if (cd ${TESTDIR} &&
- ${BASEDIR}/bin/deploy ${deploydir}/${DEPLOYDIRNAME} && \
- TIMESTAMP "clean" && \
- ${MAKE} clean && \
- TIMESTAMP "check-install prefix=${deploydir}/${DEPLOYDIRNAME}" && \
- CHECKINSTALL prefix=${deploydir}/${DEPLOYDIRNAME}); then
- cd ${CURRENT}
- cp ${RELEASE_TXT_LOC}/RELEASE.txt ${deploydir}/${DEPLOYDIRNAME}
- cp COPYING ${deploydir}/${DEPLOYDIRNAME}
- #: #continue
- else
- errcode=$?
- exit $errcode
- fi
- fi
-fi # Deploy
-
-
-#=============================
-# Run Release snapshot, update version, and commit to source repository
-#=============================
-if [ "$cmd" = "all" -o -n "$cmdrel" ]; then
- if [ "$snapshot" = "yes" ]; then
- TIMESTAMP "release"
- DISTCLEAN
- (
- # Turn on exit on error in the sub-shell so that it does not
- # commit source if errors encounter here.
- set -e
- if [ "$cmdrel" = "autogen-release" ]; then
- cd ${BASEDIR}/current_src
- else
- cd ${CURRENT}
- fi
- if [ "$HDFREPOS" = "hdf4" ]; then
- RELEASE_VERSION="`perl bin/h4vers -v`"
- echo "Making snapshot release ($RELEASE_VERSION) to ${ReleaseDir}..."
- bin/release -d $ReleaseDir $METHODS
- perl bin/h4vers -i
- elif [ "$HDFREPOS" = "hdf5" ]; then
- RELEASE_VERSION="`perl bin/h5vers -v`"
- echo "Making snapshot release ($RELEASE_VERSION) to ${ReleaseDir}..."
- if [ "${DOCVERSION}" ]; then
- bin/release -d $ReleaseDir --docver ${DOCVERSION} $METHODS
- else
- bin/release -d $ReleaseDir $METHODS
- fi
- perl bin/h5vers -i
- else
- echo "need real release steps. For now, only move current version to previous"
- fi
- COMMITSNAPSHOT
- )
- errcode=$?
- fi
-
- # Replace the previous version with the current version.
- # Should check if the errcode of the release process but there
- # are other failures after release was done (e.g. h5vers or git failures)
- # that should allow the replacement to occur.
- rm -rf ${PREVIOUS}
- mv ${CURRENT} ${PREVIOUS}
-fi #Release snapshot
-
-
-#=============================
-# Clean the test area. Default is no clean.
-#=============================
-if [ -n "$cmdclean" ] && [ "$NOMAKE" != "yes" ]; then
- TIMESTAMP "clean"
- # setup if srcdir is used.
- if [ -z "$srcdir" ]; then
- TESTDIR=${CURRENT}
- else
- case "$SRCDIRNAME" in
- "")
- SRCDIRNAME=$HOSTNAME
- ;;
- -*)
- SRCDIRNAME="$HOSTNAME$SRCDIRNAME"
- ;;
- esac
- TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
- fi
- # clean it
- if (cd ${TESTDIR} && ${MAKE} $cmdclean ) then
- :
- else
- errcode=$?
- snapshot=no
- exit $errcode
- fi
-fi # Clean the Test directory
-
-exit $errcode
diff --git a/bin/snapshot_version b/bin/snapshot_version
deleted file mode 100644
index da2e190..0000000
--- a/bin/snapshot_version
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-
-# default version for snapshot test
-# H5VERSION matches with a source version symbolic name. Will test use the
-# latest revision of that branch. If set to "hdf5", it uses the main
-# version.
-# H5VER tells runtest which version to run
-H5VERSION=hdf5
diff --git a/bin/timekeeper b/bin/timekeeper
deleted file mode 100755
index 03bc8d5..0000000
--- a/bin/timekeeper
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/bin/sh
-##
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-##
-# As a time keeper of the remote daily test process launched by runtest.
-# It sleeps for a certain time and then wakes up to hangup those processes
-# that are still around, assuming they have run too long.
-#
-# Programmer: Albert Cheng
-# Created Date: 2004/12/23
-
-# variable initialization
-waitminutes=300 # default to 5 hours == 300 minutes
-debugtimelimit=
-debugflag= # no debug
-
-# Function definitions
-#
-# PRINTMSG
-# Print a one line message left justified in a field of 70 characters
-# without newline. More output for this line later.
-#
-PRINTMSG() {
- SPACES=" "
- echo "$* $SPACES" | cut -c1-70 | tr -d '\012'
-}
-
-
-USAGE()
-{
- echo "Usage: %0 [-h] [-debug] [<time-limit>]"
- echo " Run timekeeper with <time-limit> minutes, default is $waitminutes."
- echo " If <time-limit> is in the form of HH:MM, it means wait till then."
- echo " -h print this help page"
- echo " -debug run debug mode"
-}
-
-
-ParseOption()
-{
- if [ $# -gt 0 -a "$1" = -h ]; then
- shift
- USAGE
- exit 0
- fi
- if [ $# -gt 0 -a "$1" = -debug ]; then
- shift
- debugflag=yes
- waitminutes=1 # use shorter time for debug
- fi
- if [ $# -gt 0 ]; then
- targettime=$1
- shift
-
- # find out it is minutes to wait or HH:MM to wake up
- case $targettime in
- *:*) # HH:MM
- currenttime=`date +%H:%M`
- currenthour=`echo $currenttime | cut -f1 -d:`
- currentminute=`echo $currenttime | cut -f2 -d:`
- targethour=`echo $targettime | cut -f1 -d:`
- targetminute=`echo $targettime | cut -f2 -d:`
- waitminutes=`expr \( $targethour - $currenthour \) \* 60 + $targetminute - $currentminute`
- if test $waitminutes -le 0; then
- # target time is in tomorrow, add 1 day of minutes
- waitminutes=`expr 24 \* 60 + $waitminutes`
- fi
- ;;
- *)
- waitminutes=$targettime
- ;;
- esac
- fi
-}
-
-
-# Main body
-echo "Timekeeper started at `date`"
-ParseOption $*
-waitperiod=`expr $waitminutes \* 60` # convert to seconds
-
-if [ -z "$debugflag" ]; then
- # normal time keeping mode
- # sleep first
- echo Timekeeper sleeping for $waitperiod seconds
- sleep $waitperiod
- # Look for any processes still around
- echo "Timekeeper woke up at `date`, looking for processes to terminate..."
- for x in PID.* ; do
- if [ -f $x ]; then
- pid=`cat $x`
- # check if process is still around
- if test X$pid \!= X && ps -p $pid > /dev/null; then
- echo "terminating process $x ($pid)"
- kill -HUP $pid
- echo "Remote shell command ended. But some processes might still be"
- echo "running in the remote machine. Login there to verify."
- fi
- fi
- done
-else
- # Debug mode. Launch two rsh process, one ends before, the other after
- # waitperiod. Must launch timekeeper from a subshell, else the debug
- # will wait for it too.
- myhostname=`hostname`
- ( $0 $waitminutes &)
- debugtimelimit=`expr $waitperiod - 10`
- echo rsh $myhostname sleep $debugtimelimit
- rsh $myhostname sleep $debugtimelimit &
- echo $! > PID.before
- debugtimelimit=`expr $waitperiod + 10`
- echo rsh $myhostname sleep $debugtimelimit
- rsh $myhostname sleep $debugtimelimit &
- echo $! > PID.after
-
- wait
- rm PID.before PID.after
-fi
-
-echo "Timekeeper ended at `date`"
diff --git a/c++/src/H5AbstractDs.cpp b/c++/src/H5AbstractDs.cpp
index eeb0155..70d8531 100644
--- a/c++/src/H5AbstractDs.cpp
+++ b/c++/src/H5AbstractDs.cpp
@@ -302,7 +302,7 @@ AbstractDs::getStrType() const
//--------------------------------------------------------------------------
// Function: AbstractDs::getVarLenType
-///\brief Returns the floating-point datatype of this abstract dataset,
+///\brief Returns the variable length datatype of this abstract dataset,
/// which can be a dataset or an attribute.
///\return VarLenType instance
///\exception H5::DataTypeIException
diff --git a/c++/src/H5Attribute.cpp b/c++/src/H5Attribute.cpp
index 520a4f6..a0aa33f 100644
--- a/c++/src/H5Attribute.cpp
+++ b/c++/src/H5Attribute.cpp
@@ -11,6 +11,7 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include <cstdlib>
#include <iostream>
#include <string>
diff --git a/c++/src/H5DataSet.cpp b/c++/src/H5DataSet.cpp
index 627d81b..68ddefa 100644
--- a/c++/src/H5DataSet.cpp
+++ b/c++/src/H5DataSet.cpp
@@ -11,6 +11,7 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include <cstdlib>
#include <iostream>
#include <string>
diff --git a/c++/src/H5DataType.cpp b/c++/src/H5DataType.cpp
index ff8f6dc..d889f13 100644
--- a/c++/src/H5DataType.cpp
+++ b/c++/src/H5DataType.cpp
@@ -11,6 +11,7 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#include <cstdlib>
#include <iostream>
#include <string>
diff --git a/c++/src/H5FaccProp.cpp b/c++/src/H5FaccProp.cpp
index a79ada3..68a130e 100644
--- a/c++/src/H5FaccProp.cpp
+++ b/c++/src/H5FaccProp.cpp
@@ -17,8 +17,6 @@
using std::cerr;
using std::endl;
-//#include <string>
-
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
@@ -156,7 +154,7 @@ FileAccPropList::getDriver() const
// Function: FileAccPropList::setDriver
///\brief Set file driver for this property list.
///\param new_driver_id - IN: File driver
-///\param new_driver_info - IN: Struct containing the driver-specific properites
+///\param new_driver_info - IN: Struct containing the driver-specific properties
///\exception H5::PropListIException
///\par Description
/// For information, please refer to the H5Pset_driver API in
diff --git a/c++/src/H5Library.cpp b/c++/src/H5Library.cpp
index 37516be..19c7ee7 100644
--- a/c++/src/H5Library.cpp
+++ b/c++/src/H5Library.cpp
@@ -11,8 +11,8 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <string>
#include <cstdlib>
+#include <string>
#include "H5CppDoc.h" // included only for Doxygen to generate part of RM
#include "H5Include.h"
diff --git a/c++/src/H5Location.cpp b/c++/src/H5Location.cpp
index 13a89aa..915f2a9 100644
--- a/c++/src/H5Location.cpp
+++ b/c++/src/H5Location.cpp
@@ -11,8 +11,8 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <string>
#include <iostream>
+#include <string>
using namespace std;
#include "H5Include.h"
diff --git a/c++/src/H5Location.h b/c++/src/H5Location.h
index e990ec8..0aec4d2 100644
--- a/c++/src/H5Location.h
+++ b/c++/src/H5Location.h
@@ -334,7 +334,7 @@ class H5_DLLCPP H5Location : public IdComponent {
#endif // DOXYGEN_SHOULD_SKIP_THIS
// Noop destructor.
- virtual ~H5Location();
+ virtual ~H5Location() override;
}; // end of H5Location
} // namespace H5
diff --git a/c++/src/H5PropList.cpp b/c++/src/H5PropList.cpp
index e7a83af..d4e7b39 100644
--- a/c++/src/H5PropList.cpp
+++ b/c++/src/H5PropList.cpp
@@ -12,7 +12,6 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <iostream>
-
#include <string>
#include "H5Include.h"
diff --git a/c++/src/cpp_doc_config b/c++/src/cpp_doc_config
index 3eb7645..1e26187 100644
--- a/c++/src/cpp_doc_config
+++ b/c++/src/cpp_doc_config
@@ -38,7 +38,7 @@ PROJECT_NAME =
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = "1.13.1-1, currently under development"
+PROJECT_NUMBER = "1.13.2-1, currently under development"
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
diff --git a/c++/test/dsets.cpp b/c++/test/dsets.cpp
index 53b56fc..55ffd67 100644
--- a/c++/test/dsets.cpp
+++ b/c++/test/dsets.cpp
@@ -412,7 +412,6 @@ const H5Z_class2_t H5Z_BOGUS[1] = {{
static size_t
filter_bogus(unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[], size_t nbytes,
size_t *buf_size, void **buf)
-// H5_ATTR_UNUSED variables caused warning, but taking them out caused failure.
{
// Unused
(void)flags;
diff --git a/c++/test/tattr.cpp b/c++/test/tattr.cpp
index 26699d2..dc968f9 100644
--- a/c++/test/tattr.cpp
+++ b/c++/test/tattr.cpp
@@ -95,7 +95,7 @@ struct attr4_struct {
const H5std_string ATTR5_NAME("Attr5");
const int ATTR5_RANK = 0;
-float attr_data5 = -5.123f; // Test data for 5th attribute
+float attr_data5 = -5.123F; // Test data for 5th attribute
/* Info for another attribute */
const H5std_string ATTR1A_NAME("Attr1_a");
@@ -1782,7 +1782,7 @@ test_attr_dense_create(FileCreatPropList &fcpl, FileAccPropList &fapl)
unsigned attr_num;
for (attr_num = 0; attr_num < max_compact; attr_num++) {
// Create attribute
- sprintf(attr_name, "attr %02u", attr_num);
+ snprintf(attr_name, sizeof(attr_name), "attr %02u", attr_num);
Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
// Write data to the attribute
@@ -1794,7 +1794,7 @@ test_attr_dense_create(FileCreatPropList &fcpl, FileAccPropList &fapl)
{ // Add one more attribute, to push into "dense" storage
// Create another attribute
- sprintf(attr_name, "attr %02u", attr_num);
+ snprintf(attr_name, sizeof(attr_name), "attr %02u", attr_num);
Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
// Write data to the attribute
@@ -1804,7 +1804,7 @@ test_attr_dense_create(FileCreatPropList &fcpl, FileAccPropList &fapl)
// Attempt to add attribute again, which should fail
try {
// Create another attribute
- sprintf(attr_name, "attr %02u", attr_num);
+ snprintf(attr_name, sizeof(attr_name), "attr %02u", attr_num);
Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
// continuation here, that means no exception has been thrown
diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp
index 5808136..461d258 100644
--- a/c++/test/th5s.cpp
+++ b/c++/test/th5s.cpp
@@ -73,7 +73,7 @@ struct space4_struct {
unsigned u;
float f;
char c2;
-} space4_data = {'v', 987123, -3.14f, 'g'}; /* Test data for 4th dataspace */
+} space4_data = {'v', 987123, -3.14F, 'g'}; /* Test data for 4th dataspace */
/* Null dataspace */
int space5_data = 7;
diff --git a/c++/test/titerate.cpp b/c++/test/titerate.cpp
index b6a9436..57761ea 100644
--- a/c++/test/titerate.cpp
+++ b/c++/test/titerate.cpp
@@ -160,7 +160,7 @@ test_iter_group(FileAccPropList &fapl)
DataSpace filespace;
for (int i = 0; i < NDATASETS; i++) {
- sprintf(name, "Dataset %d", i);
+ snprintf(name, sizeof(name), "Dataset %d", i);
// Create a dataset in the file
DataSet dataset = file.createDataSet(name, datatype, filespace);
@@ -351,7 +351,7 @@ test_iter_group(FileAccPropList &fapl)
* Purpose Open an attribute and verify that it has a the correct name
*-------------------------------------------------------------------------
*/
-const H5std_string FILE_NAME("titerate.h5");
+const H5std_string FILE_NAME("test_member_access.h5");
const H5std_string GRP_NAME("/Group_A");
const H5std_string FDATASET_NAME("file dset");
const H5std_string GDATASET_NAME("group dset");
@@ -396,6 +396,9 @@ test_HDFFV_9920()
int attr_data[2] = {100, 200};
hsize_t dims[1] = {DIM1};
+ /* Output message about test being performed */
+ SUBTEST("Member access");
+
try {
// Create a new file and a group in it
H5File file(FILE_NAME, H5F_ACC_TRUNC);
@@ -425,6 +428,7 @@ test_HDFFV_9920()
printelems(file, FDATASET_NAME, FATTR_NAME);
printelems(gr1, GDATASET_NAME, GATTR_NAME);
+ PASSED();
} // end of try block
// Catch all failures for handling in the same way
@@ -473,4 +477,5 @@ extern "C" void
cleanup_iterate()
{
HDremove(FILE_ITERATE.c_str());
+ HDremove(FILE_NAME.c_str());
} // cleanup_iterate
diff --git a/c++/test/tvlstr.cpp b/c++/test/tvlstr.cpp
index c91b566..405ca07 100644
--- a/c++/test/tvlstr.cpp
+++ b/c++/test/tvlstr.cpp
@@ -917,32 +917,32 @@ test_vl_rewrite()
int i;
char name[256]; // Buffer for names & data
for (i = 0; i < REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
+ snprintf(name, sizeof(name), "/set_%d", i);
write_scalar_dset(file1, type, space, name, name);
}
// Effectively copy data from file 1 to 2.
for (i = 0; i < REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
+ snprintf(name, sizeof(name), "/set_%d", i);
read_scalar_dset(file1, type, space, name, name);
write_scalar_dset(file2, type, space, name, name);
}
// Read back from file 2.
for (i = 0; i < REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
+ snprintf(name, sizeof(name), "/set_%d", i);
read_scalar_dset(file2, type, space, name, name);
}
// Remove from file 2.
for (i = 0; i < REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
+ snprintf(name, sizeof(name), "/set_%d", i);
file2.unlink(name);
}
// Effectively copy from file 1 to file 2.
for (i = 0; i < REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
+ snprintf(name, sizeof(name), "/set_%d", i);
read_scalar_dset(file1, type, space, name, name);
write_scalar_dset(file2, type, space, name, name);
}
diff --git a/config/apple b/config/apple
index 98dede2..2da7e93 100644
--- a/config/apple
+++ b/config/apple
@@ -27,8 +27,14 @@ if test "X-" = "X-$CC"; then
CC_BASENAME=gcc
;;
*)
- CC=clang
- CC_BASENAME=clang
+ if test "X-$enable_parallel" = "X-yes"; then
+ # default to use mpicc which is the defacto MPI compiler name
+ CC=mpicc
+ CC_BASENAME=mpicc
+ else
+ CC=clang
+ CC_BASENAME=clang
+ fi
# Production
PROD_CFLAGS="-O3"
@@ -47,9 +53,11 @@ if test "X-" = "X-$CC"; then
esac
fi
-# Figure out compiler flags
+# Figure out C compiler flags
. $srcdir/config/gnu-flags
. $srcdir/config/clang-flags
+. $srcdir/config/intel-flags
+
# temp patch: if GCC 4.2.1 is used in Lion or Mountain Lion systems, do not
# use -O option as it causes failures in test/dt_arith.
case "$host_os" in
@@ -65,15 +73,9 @@ case "$host_os" in
esac
;;
esac
-
-. $srcdir/config/intel-flags
+
if test "X-" = "X-$FC"; then
case $CC_BASENAME in
- clang)
- # clang has no fortran compiler. Use gfortran.
- FC=gfortran
- FC_BASENAME=gfortran
- ;;
gcc*)
FC=gfortran
FC_BASENAME=gfortran
@@ -82,15 +84,26 @@ if test "X-" = "X-$FC"; then
FC=ifort
FC_BASENAME=ifort
;;
+ mpicc*)
+ FC=mpif90
+ FC_BASENAME=mpif90
+ ;;
+ clang)
+ # clang has no fortran compiler. Use gfortran.
+ FC=gfortran
+ FC_BASENAME=gfortran
+ ;;
esac
fi
+# Figure out FORTRAN compiler flags
+. $srcdir/config/gnu-fflags
+. $srcdir/config/intel-fflags
+
+
+# The default C++ compiler is `clang++'.
if test "X-" = "X-$CXX"; then
case $CC_BASENAME in
- clang)
- CXX=clang++
- CXX_BASENAME=clang++
- ;;
gcc)
CXX=g++
CXX_BASENAME=g++
@@ -99,18 +112,21 @@ if test "X-" = "X-$CXX"; then
CXX=icpc
CXX_BASENAME=icpc
;;
+ mpicc*)
+ FC=mpif90
+ FC_BASENAME=mpif90
+ ;;
+ clang)
+ CXX=clang++
+ CXX_BASENAME=clang++
+ ;;
esac
fi
-case $CXX_BASENAME in
- clang++)
- PROD_CXXFLAGS="-O3"
- DEBUG_CXXFLAGS="-g -O0"
- # Use this for profiling with gprof
- # Just "-g" for now. More later.
- PROFILE_CXXFLAGS="-g"
- ;;
-esac
+# Figure out C++ compiler flags
+. $srcdir/config/intel-cxxflags # Do this ahead of GNU to avoid icpc being detected as g++
+. $srcdir/config/gnu-cxxflags
+. $srcdir/config/clang-cxxflags
# compiler version strings
case $CC in
@@ -133,16 +149,15 @@ case $CC in
echo "No match to get cc_version_info for $CC"
;;
esac
+
# Figure out Fortran compiler flags and version strings
case $FC in
*gfortran*)
- . $srcdir/config/gnu-fflags
fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS --version 2>&1 |\
grep 'GCC' | sed 's/\(.*(GCC) [-a-z0-9\. ]*\).*/\1/'`
;;
*ifc*|*ifort*)
- . $srcdir/config/intel-fflags
fc_version_info=`$FC $FCFLAGS $H5_FCFLAGS -V 2>&1 | grep 'Version' |\
sed 's/\(Intel.* Compiler\).*\( Version [a-z0-9\.]*\).*\( Build [0-9]*\)/\1\2\3/'`
;;
@@ -156,13 +171,11 @@ esac
# get c++ version info
case $CXX in
clang++)
- . $srcdir/config/clang-cxxflags
cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS --version 2>&1 |\
grep 'Apple' | sed 's/(.*//'`
;;
*g++*)
- . $srcdir/config/gnu-cxxflags
cxx_version_info=`$CXX $CXXFLAGS $H5_CXXFLAGS --version 2>&1 |\
grep 'GCC' | sed 's/.*\((GCC) [-a-z0-9\. ]*.*\)/\1/'`
;;
diff --git a/config/clang-warnings/developer-general b/config/clang-warnings/developer-general
index 74d8404..b80552f 100644
--- a/config/clang-warnings/developer-general
+++ b/config/clang-warnings/developer-general
@@ -1,4 +1,3 @@
--Wformat-nonliteral
-Wmissing-noreturn
-Wsometimes-uninitialized
-Wswitch-enum
diff --git a/config/clang-warnings/general b/config/clang-warnings/general
index f0c9b93..433dff9 100644
--- a/config/clang-warnings/general
+++ b/config/clang-warnings/general
@@ -8,19 +8,13 @@
-Wformat=2
-Wframe-larger-than=16384
-Wimplicit-fallthrough
-#
-# NOTE: Due to the divergence in the C and C++, we're dropping support for
-# compiling the C library with a C++ compiler and dropping the -Wc++-compat
-# warning.
-#
--Wno-c++-compat
-#
-# NOTE: Disable the -Wformat-nonliteral from -Wformat=2 here and re-add
-# it to the developer flags.
-#
--Wno-format-nonliteral
-Wnull-dereference
-Wunused-const-variable
-Wwrite-strings
-Wpedantic
-Wvolatile-register-var
+# NOTE: Due to the divergence in the C and C++, we're dropping support for
+# compiling the C library with a C++ compiler and dropping the -Wc++-compat
+# warning.
+#
+-Wno-c++-compat
diff --git a/config/cmake/CTestCustom.cmake b/config/cmake/CTestCustom.cmake
index f958804..9517e4b 100644
--- a/config/cmake/CTestCustom.cmake
+++ b/config/cmake/CTestCustom.cmake
@@ -19,6 +19,8 @@ set (CTEST_CUSTOM_MAXIMUM_FAILED_TEST_OUTPUT_SIZE 50000)
set (CTEST_CUSTOM_WARNING_EXCEPTION
${CTEST_CUSTOM_WARNING_EXCEPTION}
"note.*expected.*void.*but argument is of type.*volatile"
+ "plugin-build.*:[ \t]*warning"
+ "CMake Warning*stamp"
"src.ZLIB.*:[ \t]*warning"
"warning LNK4197:.*ZLIB-prefix"
"src.SZIP.*:[ \t]*warning"
diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in
index 1f7f4b1..4956c97 100644
--- a/config/cmake/H5pubconf.h.in
+++ b/config/cmake/H5pubconf.h.in
@@ -265,6 +265,9 @@
/* Define if we have parallel support */
#cmakedefine H5_HAVE_PARALLEL @H5_HAVE_PARALLEL@
+/* Define if we have support for writing to filtered datasets in parallel */
+#cmakedefine H5_HAVE_PARALLEL_FILTERED_WRITES @H5_HAVE_PARALLEL_FILTERED_WRITES@
+
/* Define if both pread and pwrite exist. */
#cmakedefine H5_HAVE_PREADWRITE @H5_HAVE_PREADWRITE@
diff --git a/config/cmake/HDF5PluginMacros.cmake b/config/cmake/HDF5PluginMacros.cmake
index 4e05399..dbed15c 100644
--- a/config/cmake/HDF5PluginMacros.cmake
+++ b/config/cmake/HDF5PluginMacros.cmake
@@ -57,8 +57,8 @@ macro (EXTERNAL_PLUGIN_LIBRARY compress_type)
add_dependencies (h5ex_d_mafisc ${HDF5_LIBSH_TARGET})
target_include_directories (h5ex_d_mafisc PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR}")
endif ()
- if (ENABLE_SZF)
- add_dependencies (h5szf ${HDF5_LIBSH_TARGET})
+ if (ENABLE_SZ)
+ add_dependencies (h5sz ${HDF5_LIBSH_TARGET})
add_dependencies (h5ex_d_sz ${HDF5_LIBSH_TARGET})
target_include_directories (h5ex_d_sz PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR}")
endif ()
diff --git a/config/cmake/HDF5_Examples.cmake.in b/config/cmake/HDF5_Examples.cmake.in
index db638fd..795399e 100644
--- a/config/cmake/HDF5_Examples.cmake.in
+++ b/config/cmake/HDF5_Examples.cmake.in
@@ -81,14 +81,13 @@ set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDN
#endif()
###############################################################################################################
+set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ROOT:PATH=${INSTALLDIR}")
if(WIN32)
set(SITE_OS_NAME "Windows")
- set(ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
else()
- set(ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
set(ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
diff --git a/config/cmake/HDFFortranCompilerFlags.cmake b/config/cmake/HDFFortranCompilerFlags.cmake
index 754259e..84b3ebe 100644
--- a/config/cmake/HDFFortranCompilerFlags.cmake
+++ b/config/cmake/HDFFortranCompilerFlags.cmake
@@ -62,7 +62,7 @@ if (NOT MSVC AND NOT MINGW)
# General flags
if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/ifort-general")
- list (APPEND HDF5_CMAKE_Fortran_FLAGS "-stand:f03" "-free")
+ list (APPEND HDF5_CMAKE_Fortran_FLAGS "-stand f03" "-free")
elseif (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-general")
list (APPEND HDF5_CMAKE_Fortran_FLAGS "-ffree-form" "-fimplicit-none")
@@ -118,8 +118,8 @@ if (NOT MSVC AND NOT MINGW)
endif ()
else ()
if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
- #ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-ifort-general")
- list (APPEND HDF5_CMAKE_Fortran_FLAGS "/warn:all" "/stand:f03" "/free")
+ ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/win-ifort-general")
+ list (APPEND HDF5_CMAKE_Fortran_FLAGS "/stand:f03" "/free")
endif ()
endif ()
diff --git a/config/cmake/README.txt.cmake.in b/config/cmake/README.md.cmake.in
index 9289870..b60e729 100644
--- a/config/cmake/README.txt.cmake.in
+++ b/config/cmake/README.md.cmake.in
@@ -14,7 +14,7 @@ It was built with the following options:
The contents of this directory are:
COPYING - Copyright notice
- README.txt - This file
+ README.md - This file
@HDF5_PACKAGE_NAME@-@HDF5_PACKAGE_VERSION@-@BINARY_SYSTEM_NAME@.@BINARY_INSTALL_ENDING@ - HDF5 Install Package
This binary was built with the ZLIB and SZIP/Libaec external libraries and are
diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake
index e423deb..221c0c9 100644
--- a/config/cmake/cacheinit.cmake
+++ b/config/cmake/cacheinit.cmake
@@ -47,12 +47,12 @@ set (HDF5_MINGW_STATIC_GCC_LIBS ON CACHE BOOL "Statically link libgcc/libstdc++"
set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT TGZ)" FORCE)
set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT TGZ)
-set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE)
+set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use HDF5_ZLib from compressed file" FORCE)
set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE)
set (SZAEC_TGZ_NAME "LIBAEC.tar.gz" CACHE STRING "Use SZip AEC from compressed file" FORCE)
set (USE_LIBAEC ON CACHE BOOL "Use libaec szip replacement" FORCE)
-set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE)
+set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of HDF5_ZLIB package" FORCE)
set (LIBAEC_PACKAGE_NAME "libaec" CACHE STRING "Name of AEC SZIP package" FORCE)
set (SZIP_PACKAGE_NAME "szip" CACHE STRING "Name of SZIP package" FORCE)
@@ -85,12 +85,12 @@ set (BLOSC_TGZ_NAME "c-blosc.tar.gz" CACHE STRING "Use BLOSC from compressed fil
set (BLOSC_PACKAGE_NAME "blosc" CACHE STRING "Name of BLOSC package" FORCE)
-set (ZLIB_GIT_URL "https://git@bitbucket.hdfgroup.org/scm/test/zlib.git" CACHE STRING "Use ZLIB from HDF repo" FORCE)
-set (ZLIB_GIT_BRANCH "master" CACHE STRING "" FORCE)
+set (BLOSC_ZLIB_GIT_URL "https://git@bitbucket.hdfgroup.org/scm/test/zlib.git" CACHE STRING "Use BLOSC_ZLIB from HDF repo" FORCE)
+set (BLOSC_ZLIB_GIT_BRANCH "master" CACHE STRING "" FORCE)
-set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE)
+set (BLOSC_ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use BLOSC_ZLib from compressed file" FORCE)
-set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE)
+set (BLOSC_ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of BLOSC_ZLIB package" FORCE)
#######
# bzip2
@@ -159,15 +159,15 @@ set (LZF_PACKAGE_NAME "lzf" CACHE STRING "Name of LZF package" FORCE)
#set (MAFISC_PACKAGE_NAME "mafisc" CACHE STRING "Name of MAFISC package" FORCE)
######
-# szf
+# sz
######
-set (SZF_GIT_URL "https://github.com/disheng222/SZ" CACHE STRING "Use SZ from github repository" FORCE)
-set (SZF_GIT_BRANCH "master" CACHE STRING "" FORCE)
+set (SZ_GIT_URL "https://github.com/disheng222/SZ" CACHE STRING "Use SZ filter from github repository" FORCE)
+set (SZ_GIT_BRANCH "master" CACHE STRING "" FORCE)
-set (SZF_TGZ_NAME "szf.tar.gz" CACHE STRING "Use SZ from compressed file" FORCE)
+set (SZ_TGZ_NAME "szf.tar.gz" CACHE STRING "Use SZ filter from compressed file" FORCE)
-set (SZF_PACKAGE_NAME "szf" CACHE STRING "Name of SZ package" FORCE)
+set (SZ_PACKAGE_NAME "SZ" CACHE STRING "Name of SZ filter package" FORCE)
######
# zfp
diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in
index eab09ba..35cee4f 100644
--- a/config/cmake/hdf5-config.cmake.in
+++ b/config/cmake/hdf5-config.cmake.in
@@ -68,8 +68,8 @@ endif ()
if (${HDF5_PACKAGE_NAME}_BUILD_JAVA)
set (${HDF5_PACKAGE_NAME}_JAVA_INCLUDE_DIRS
@PACKAGE_CURRENT_BUILD_DIR@/lib/jarhdf5-@HDF5_VERSION_STRING@.jar
- @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-1.7.25.jar
- @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-1.7.25.jar
+ @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-1.7.33.jar
+ @PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-1.7.33.jar
)
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARY "@PACKAGE_CURRENT_BUILD_DIR@/lib")
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARIES "${${HDF5_PACKAGE_NAME}_JAVA_LIBRARY}")
@@ -116,7 +116,7 @@ if (NOT TARGET "@HDF5_PACKAGE@")
include (@PACKAGE_SHARE_INSTALL_DIR@/@ZLIB_PACKAGE_NAME@@HDF_PACKAGE_EXT@-targets.cmake)
endif ()
if (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT AND ${HDF5_PACKAGE_NAME}_PACKAGE_EXTLIBS)
- include (@PACKAGE_SHARE_INSTALL_DIR@/@SZ_PACKAGE_NAME@@HDF_PACKAGE_EXT@-targets.cmake)
+ include (@PACKAGE_SHARE_INSTALL_DIR@/@SZIP_PACKAGE_NAME@@HDF_PACKAGE_EXT@-targets.cmake)
endif ()
include (@PACKAGE_SHARE_INSTALL_DIR@/@HDF5_PACKAGE@@HDF_PACKAGE_EXT@-targets.cmake)
endif ()
diff --git a/config/cmake/libh5cc.in b/config/cmake/libh5cc.in
index ecdd13e..f5d8d4c 100644
--- a/config/cmake/libh5cc.in
+++ b/config/cmake/libh5cc.in
@@ -27,6 +27,27 @@ prg=$dir/$(basename -- "$prg") || exit
printf '%s\n' "$prg"
printf 'dir is %s\n' "$dir"
+
+# Show the configuration summary of the library recorded in the
+# libhdf5.settings file reside in the lib directory.
+showconfigure()
+{
+ cat $dir/lib/libhdf5.settings
+ status=$?
+}
+
export PKG_CONFIG_PATH=$dir/lib/pkgconfig
-@_PKG_CONFIG_COMPILER@ $@ `pkg-config --define-variable=prefix=$dir --cflags --libs @_PKG_CONFIG_LIBNAME@`
+for arg in $@ ; do
+ case "$arg" in
+ -showconfig)
+ showconfigure
+ exit $status
+ ;;
+ *)
+ @_PKG_CONFIG_COMPILER@ $@ `pkg-config --define-variable=prefix=$dir --cflags --libs @_PKG_CONFIG_LIBNAME@`
+ status=$?
+ exit $status
+ ;;
+ esac
+done
diff --git a/config/cmake/scripts/HDF5config.cmake b/config/cmake/scripts/HDF5config.cmake
index 2f5af77..9a3ead2 100644
--- a/config/cmake/scripts/HDF5config.cmake
+++ b/config/cmake/scripts/HDF5config.cmake
@@ -37,7 +37,7 @@ cmake_minimum_required (VERSION 3.12)
# CTEST_SOURCE_NAME - source folder
##############################################################################
-set (CTEST_SOURCE_VERSION "1.13.1")
+set (CTEST_SOURCE_VERSION "1.13.2")
set (CTEST_SOURCE_VERSEXT "-1")
##############################################################################
@@ -68,7 +68,7 @@ endif ()
# build generator must be defined
if (NOT DEFINED BUILD_GENERATOR)
- message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2019, VS201964, VS2017, or VS201764, VS2015, VS201564")
+ message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2019, VS201964, VS2017, VS201764, VS2015, VS201564")
endif ()
###################################################################
diff --git a/config/cmake_ext_mod/ConfigureChecks.cmake b/config/cmake_ext_mod/ConfigureChecks.cmake
index 6a9c274..6242b12 100644
--- a/config/cmake_ext_mod/ConfigureChecks.cmake
+++ b/config/cmake_ext_mod/ConfigureChecks.cmake
@@ -223,65 +223,63 @@ set (LINUX_LFS 0)
set (HDF_EXTRA_C_FLAGS)
set (HDF_EXTRA_FLAGS)
if (MINGW OR NOT WINDOWS)
- # Might want to check explicitly for Linux and possibly Cygwin
- # instead of checking for not Solaris or Darwin.
- if (NOT ${HDF_PREFIX}_HAVE_SOLARIS AND NOT ${HDF_PREFIX}_HAVE_DARWIN)
- # Linux Specific flags
- # This was originally defined as _POSIX_SOURCE which was updated to
- # _POSIX_C_SOURCE=199506L to expose a greater amount of POSIX
- # functionality so clock_gettime and CLOCK_MONOTONIC are defined
- # correctly. This was later updated to 200112L so that
- # posix_memalign() is visible for the direct VFD code on Linux
- # systems.
- # POSIX feature information can be found in the gcc manual at:
- # http://www.gnu.org/s/libc/manual/html_node/Feature-Test-Macros.html
- set (HDF_EXTRA_C_FLAGS -D_POSIX_C_SOURCE=200809L)
-
- # Need to add this so that O_DIRECT is visible for the direct
- # VFD on Linux systems.
- set (HDF_EXTRA_C_FLAGS ${HDF_EXTRA_C_FLAGS} -D_GNU_SOURCE)
-
- option (HDF_ENABLE_LARGE_FILE "Enable support for large (64-bit) files on Linux." ON)
- if (HDF_ENABLE_LARGE_FILE AND NOT DEFINED TEST_LFS_WORKS_RUN)
- set (msg "Performing TEST_LFS_WORKS")
- try_run (TEST_LFS_WORKS_RUN TEST_LFS_WORKS_COMPILE
- ${CMAKE_BINARY_DIR}
- ${HDF_RESOURCES_EXT_DIR}/HDFTests.c
- COMPILE_DEFINITIONS "-DTEST_LFS_WORKS"
- )
+ if (CMAKE_SYSTEM_NAME MATCHES "Linux")
+ # Linux Specific flags
+ # This was originally defined as _POSIX_SOURCE which was updated to
+ # _POSIX_C_SOURCE=199506L to expose a greater amount of POSIX
+ # functionality so clock_gettime and CLOCK_MONOTONIC are defined
+ # correctly. This was later updated to 200112L so that
+ # posix_memalign() is visible for the direct VFD code on Linux
+ # systems.
+ # POSIX feature information can be found in the gcc manual at:
+ # http://www.gnu.org/s/libc/manual/html_node/Feature-Test-Macros.html
+ set (HDF_EXTRA_C_FLAGS -D_POSIX_C_SOURCE=200809L)
+
+ # Need to add this so that O_DIRECT is visible for the direct
+ # VFD on Linux systems.
+ set (HDF_EXTRA_C_FLAGS ${HDF_EXTRA_C_FLAGS} -D_GNU_SOURCE)
+
+ option (HDF_ENABLE_LARGE_FILE "Enable support for large (64-bit) files on Linux." ON)
+ if (HDF_ENABLE_LARGE_FILE AND NOT DEFINED TEST_LFS_WORKS_RUN)
+ set (msg "Performing TEST_LFS_WORKS")
+ try_run (TEST_LFS_WORKS_RUN TEST_LFS_WORKS_COMPILE
+ ${CMAKE_BINARY_DIR}
+ ${HDF_RESOURCES_EXT_DIR}/HDFTests.c
+ COMPILE_DEFINITIONS "-DTEST_LFS_WORKS"
+ )
- # The LARGEFILE definitions were from the transition period
- # and are probably no longer needed. The FILE_OFFSET_BITS
- # check should be generalized for all POSIX systems as it
- # is in the Autotools.
- if (TEST_LFS_WORKS_COMPILE)
- if (TEST_LFS_WORKS_RUN MATCHES 0)
- set (TEST_LFS_WORKS 1 CACHE INTERNAL ${msg})
- set (LARGEFILE 1)
- set (HDF_EXTRA_FLAGS ${HDF_EXTRA_FLAGS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE)
- if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
- message (VERBOSE "${msg}... yes")
+ # The LARGEFILE definitions were from the transition period
+ # and are probably no longer needed. The FILE_OFFSET_BITS
+ # check should be generalized for all POSIX systems as it
+ # is in the Autotools.
+ if (TEST_LFS_WORKS_COMPILE)
+ if (TEST_LFS_WORKS_RUN MATCHES 0)
+ set (TEST_LFS_WORKS 1 CACHE INTERNAL ${msg})
+ set (LARGEFILE 1)
+ set (HDF_EXTRA_FLAGS ${HDF_EXTRA_FLAGS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE)
+ if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
+ message (VERBOSE "${msg}... yes")
+ endif ()
+ else ()
+ set (TEST_LFS_WORKS "" CACHE INTERNAL ${msg})
+ if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
+ message (VERBOSE "${msg}... no")
+ endif ()
+ file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "Test TEST_LFS_WORKS Run failed with the following exit code:\n ${TEST_LFS_WORKS_RUN}\n"
+ )
endif ()
else ()
set (TEST_LFS_WORKS "" CACHE INTERNAL ${msg})
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
- message (VERBOSE "${msg}... no")
+ message (VERBOSE "${msg}... no")
endif ()
file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
- "Test TEST_LFS_WORKS Run failed with the following exit code:\n ${TEST_LFS_WORKS_RUN}\n"
+ "Test TEST_LFS_WORKS Compile failed\n"
)
endif ()
- else ()
- set (TEST_LFS_WORKS "" CACHE INTERNAL ${msg})
- if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
- message (VERBOSE "${msg}... no")
- endif ()
- file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
- "Test TEST_LFS_WORKS Compile failed\n"
- )
endif ()
- endif ()
- set (CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} ${HDF_EXTRA_FLAGS})
+ set (CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} ${HDF_EXTRA_FLAGS})
endif ()
endif ()
diff --git a/config/cmake_ext_mod/HDFLibMacros.cmake b/config/cmake_ext_mod/HDFLibMacros.cmake
index 2af3229..40d06e7 100644
--- a/config/cmake_ext_mod/HDFLibMacros.cmake
+++ b/config/cmake_ext_mod/HDFLibMacros.cmake
@@ -139,11 +139,11 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
# add_subdirectory(${szip_SOURCE_DIR} ${szip_BINARY_DIR})
# endif()
#
-##include (${BINARY_DIR}/${SZ_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake)
+##include (${BINARY_DIR}/${SZIP_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake)
# Create imported target szip-static
if (USE_LIBAEC)
add_library(${HDF_PACKAGE_NAMESPACE}sz-static STATIC IMPORTED)
- HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}sz-static "sz" STATIC "")
+ HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}sz-static "szaec" STATIC "")
add_dependencies (${HDF_PACKAGE_NAMESPACE}sz-static SZIP)
add_library(${HDF_PACKAGE_NAMESPACE}aec-static STATIC IMPORTED)
HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}aec-static "aec" STATIC "")
@@ -188,7 +188,7 @@ endmacro ()
#-------------------------------------------------------------------------------
macro (EXTERNAL_ZLIB_LIBRARY compress_type)
if (${compress_type} MATCHES "GIT")
- EXTERNALPROJECT_ADD (ZLIB
+ EXTERNALPROJECT_ADD (HDF5_ZLIB
GIT_REPOSITORY ${ZLIB_URL}
GIT_TAG ${ZLIB_BRANCH}
INSTALL_COMMAND ""
@@ -207,7 +207,7 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
-DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE}
)
elseif (${compress_type} MATCHES "TGZ")
- EXTERNALPROJECT_ADD (ZLIB
+ EXTERNALPROJECT_ADD (HDF5_ZLIB
URL ${ZLIB_URL}
URL_MD5 ""
INSTALL_COMMAND ""
@@ -226,19 +226,15 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
-DPACKAGE_NAMESPACE=${HDF_PACKAGE_NAMESPACE}
)
endif ()
- externalproject_get_property (ZLIB BINARY_DIR SOURCE_DIR)
+ externalproject_get_property (HDF5_ZLIB BINARY_DIR SOURCE_DIR)
- if (WIN32)
- set (ZLIB_LIB_NAME "zlib")
- else ()
- set (ZLIB_LIB_NAME "z")
- endif ()
+ set (ZLIB_LIB_NAME "z")
##include (${BINARY_DIR}/${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake)
# Create imported target zlib-static
add_library(${HDF_PACKAGE_NAMESPACE}zlib-static STATIC IMPORTED)
# add_library(${HDF_PACKAGE_NAMESPACE}zlib-static ALIAS zlib-static)
HDF_IMPORT_SET_LIB_OPTIONS (${HDF_PACKAGE_NAMESPACE}zlib-static ${ZLIB_LIB_NAME} STATIC "")
- add_dependencies (${HDF_PACKAGE_NAMESPACE}zlib-static ZLIB)
+ add_dependencies (${HDF_PACKAGE_NAMESPACE}zlib-static HDF5_ZLIB)
set (ZLIB_STATIC_LIBRARY "${HDF_PACKAGE_NAMESPACE}zlib-static")
set (ZLIB_LIBRARIES ${ZLIB_STATIC_LIBRARY})
@@ -256,6 +252,6 @@ macro (PACKAGE_ZLIB_LIBRARY compress_type)
)
set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/zconf.h)
if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "TGZ")
- add_dependencies (ZLIB-GenHeader-Copy ZLIB)
+ add_dependencies (ZLIB-GenHeader-Copy HDF5_ZLIB)
endif ()
endmacro ()
diff --git a/config/cmake_ext_mod/HDFMacros.cmake b/config/cmake_ext_mod/HDFMacros.cmake
index 6da2b74..0f9b367 100644
--- a/config/cmake_ext_mod/HDFMacros.cmake
+++ b/config/cmake_ext_mod/HDFMacros.cmake
@@ -223,7 +223,7 @@ macro (TARGET_C_PROPERTIES wintarget libtype)
endmacro ()
#-----------------------------------------------------------------------------
-# Configure the README.txt file for the binary package
+# Configure the README.md file for the binary package
#-----------------------------------------------------------------------------
macro (HDF_README_PROPERTIES target_fortran)
set (BINARY_SYSTEM_NAME ${CMAKE_SYSTEM_NAME})
@@ -301,8 +301,8 @@ macro (HDF_README_PROPERTIES target_fortran)
endif ()
configure_file (
- ${HDF_RESOURCES_DIR}/README.txt.cmake.in
- ${CMAKE_BINARY_DIR}/README.txt @ONLY
+ ${HDF_RESOURCES_DIR}/README.md.cmake.in
+ ${CMAKE_BINARY_DIR}/README.md @ONLY
)
endmacro ()
@@ -356,7 +356,7 @@ macro (HDF_DIR_PATHS package_prefix)
endif ()
endif ()
if (NOT ${package_prefix}_INSTALL_CMAKE_DIR)
- set (${package_prefix}_INSTALL_CMAKE_DIR share/cmake)
+ set (${package_prefix}_INSTALL_CMAKE_DIR cmake)
endif ()
# Always use full RPATH, i.e. don't skip the full RPATH for the build tree
@@ -427,7 +427,7 @@ macro (HDF_DIR_PATHS package_prefix)
endif ()
endif ()
- if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+ if (NOT ${package_prefix}_EXTERNALLY_CONFIGURED AND CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
if (CMAKE_HOST_UNIX)
set (CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}/HDF_Group/${HDF5_PACKAGE_NAME}/${HDF5_PACKAGE_VERSION}"
CACHE PATH "Install path prefix, prepended onto install directories." FORCE)
diff --git a/config/cmake_ext_mod/HDFTests.c b/config/cmake_ext_mod/HDFTests.c
index c434be5..2b2a202 100644
--- a/config/cmake_ext_mod/HDFTests.c
+++ b/config/cmake_ext_mod/HDFTests.c
@@ -115,8 +115,8 @@ int main(void)
for (currentArg = llwidthArgs; *currentArg != NULL; currentArg++)
{
char formatString[64];
- sprintf(formatString, "%%%sd", *currentArg);
- sprintf(s, formatString, x);
+ snprintf(formatString, sizeof(formatString), "%%%sd", *currentArg);
+ snprintf(s, 128, formatString, x);
if (strcmp(s, "1099511627776") == 0)
{
printf("PRINTF_LL_WIDTH=[%s]\n", *currentArg);
diff --git a/config/cmake_ext_mod/runTest.cmake b/config/cmake_ext_mod/runTest.cmake
index f552dcd..b373fe4 100644
--- a/config/cmake_ext_mod/runTest.cmake
+++ b/config/cmake_ext_mod/runTest.cmake
@@ -128,7 +128,7 @@ message (STATUS "COMMAND Error: ${TEST_ERROR}")
# remove special output
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
-string (FIND "${TEST_STREAM}" "_pmi_alps" TEST_FIND_RESULT)
+string (FIND TEST_STREAM "_pmi_alps" TEST_FIND_RESULT)
if (TEST_FIND_RESULT GREATER -1)
string (REGEX REPLACE "^.*_pmi_alps[^\n]+\n" "" TEST_STREAM "${TEST_STREAM}")
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} ${TEST_STREAM})
@@ -142,7 +142,7 @@ else ()
# the error stack remains in the .err file
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
endif ()
-string (FIND "${TEST_STREAM}" "no version information available" TEST_FIND_RESULT)
+string (FIND TEST_STREAM "no version information available" TEST_FIND_RESULT)
if (TEST_FIND_RESULT GREATER -1)
string (REGEX REPLACE "^.*no version information available[^\n]+\n" "" TEST_STREAM "${TEST_STREAM}")
# write back the changes to the original files
@@ -233,7 +233,7 @@ if (NOT TEST_SKIP_COMPARE)
list (SORT v1)
list (SORT v2)
if (NOT v1 STREQUAL v2)
- set(TEST_COMPARE_RESULT 1)
+ set (TEST_COMPARE_RESULT 1)
endif ()
endif ()
@@ -243,7 +243,14 @@ if (NOT TEST_SKIP_COMPARE)
list (LENGTH test_act len_act)
file (STRINGS ${TEST_FOLDER}/${TEST_REFERENCE} test_ref)
list (LENGTH test_ref len_ref)
+ if (NOT len_act EQUAL len_ref)
+ set (TEST_COMPARE_RESULT 1)
+ endif ()
if (len_act GREATER 0 AND len_ref GREATER 0)
+ if (TEST_SORT_COMPARE)
+ list (SORT test_act)
+ list (SORT test_ref)
+ endif ()
math (EXPR _FP_LEN "${len_ref} - 1")
foreach (line RANGE 0 ${_FP_LEN})
list (GET test_act ${line} str_act)
@@ -263,9 +270,6 @@ if (NOT TEST_SKIP_COMPARE)
message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_REFERENCE} is empty")
endif ()
endif ()
- if (NOT len_act EQUAL len_ref)
- set (TEST_COMPARE_RESULT 1)
- endif ()
endif ()
endif ()
diff --git a/config/gnu-warnings/cxx-general b/config/gnu-warnings/cxx-general
index 9548cc0..1626524 100644
--- a/config/gnu-warnings/cxx-general
+++ b/config/gnu-warnings/cxx-general
@@ -17,7 +17,6 @@
-Winit-self
-Winvalid-pch
-Wmissing-include-dirs
--Wno-format-nonliteral
-Wnon-virtual-dtor
-Wold-style-cast
-Woverloaded-virtual
diff --git a/config/gnu-warnings/developer-general b/config/gnu-warnings/developer-general
index b34c4b7..460b874 100644
--- a/config/gnu-warnings/developer-general
+++ b/config/gnu-warnings/developer-general
@@ -1,9 +1,6 @@
# (suggestions from gcc, not code problems)
-# NOTE: -Wformat-nonliteral added back in here (from being disabled in
-# H5_CFLAGS)
-Waggregate-return
-Wdisabled-optimization
--Wformat-nonliteral
-Winline
-Wmissing-format-attribute
-Wmissing-noreturn
diff --git a/config/gnu-warnings/general b/config/gnu-warnings/general
index a7a20b7..df4c613 100644
--- a/config/gnu-warnings/general
+++ b/config/gnu-warnings/general
@@ -15,18 +15,12 @@
-Winit-self
-Winvalid-pch
-Wmissing-include-dirs
-#
+-Wshadow
+-Wundef
+-Wwrite-strings
+-pedantic
# NOTE: Due to the divergence in the C and C++, we're dropping support for
# compiling the C library with a C++ compiler and dropping the -Wc++-compat
# warning.
#
-Wno-c++-compat
-#
-# NOTE: Disable the -Wformat-nonliteral from -Wformat=2 here and re-add
-# it to the developer flags.
-#
--Wno-format-nonliteral
--Wshadow
--Wundef
--Wwrite-strings
--pedantic
diff --git a/config/intel-warnings/ifort-general b/config/intel-warnings/ifort-general
index a9da0e5..1644c7c 100644
--- a/config/intel-warnings/ifort-general
+++ b/config/intel-warnings/ifort-general
@@ -1 +1 @@
--warn:all
+-warn all
diff --git a/config/intel-warnings/win-ifort-general b/config/intel-warnings/win-ifort-general
new file mode 100644
index 0000000..a335959
--- /dev/null
+++ b/config/intel-warnings/win-ifort-general
@@ -0,0 +1 @@
+/warn:all
diff --git a/config/sanitizer/code-coverage.cmake b/config/sanitizer/code-coverage.cmake
index c79aeac..e71bfd7 100644
--- a/config/sanitizer/code-coverage.cmake
+++ b/config/sanitizer/code-coverage.cmake
@@ -80,10 +80,17 @@ option(
OFF)
# Programs
-find_program(LLVM_COV_PATH llvm-cov)
-find_program(LLVM_PROFDATA_PATH llvm-profdata)
-find_program(LCOV_PATH lcov)
-find_program(GENHTML_PATH genhtml)
+if(WIN32)
+ find_program(LLVM_COV_PATH llvm-cov PATHS ENV VS2019INSTALLDIR PATH_SUFFIXES "VC/Tools/Llvm/x64/bin")
+ find_program(LLVM_PROFDATA_PATH llvm-profdata PATHS ENV VS2019INSTALLDIR PATH_SUFFIXES "VC/Tools/Llvm/x64/bin")
+ find_program(LCOV_PATH lcov PATHS ENV VS2019INSTALLDIR PATH_SUFFIXES "VC/Tools/Llvm/x64/bin")
+ find_program(GENHTML_PATH genhtml PATHS ENV VS2019INSTALLDIR PATH_SUFFIXES "VC/Tools/Llvm/x64/bin")
+else()
+ find_program(LLVM_COV_PATH llvm-cov)
+ find_program(LLVM_PROFDATA_PATH llvm-profdata)
+ find_program(LCOV_PATH lcov)
+ find_program(GENHTML_PATH genhtml)
+endif()
# Variables
set(CMAKE_COVERAGE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/ccov)
diff --git a/config/sanitizer/sanitizers.cmake b/config/sanitizer/sanitizers.cmake
index 8bf1d5b..b06992f 100644
--- a/config/sanitizer/sanitizers.cmake
+++ b/config/sanitizer/sanitizers.cmake
@@ -83,12 +83,21 @@ if(USE_SANITIZER)
elseif(MSVC)
if(USE_SANITIZER MATCHES "([Aa]ddress)")
message(STATUS "Building with Address sanitizer")
- append("/fsanitize=address" CMAKE_C_SANITIZER_FLAGS CMAKE_CXX_SANITIZER_FLAGS)
+ append("-fsanitize=address" CMAKE_C_SANITIZER_FLAGS CMAKE_CXX_SANITIZER_FLAGS)
else()
message(FATAL_ERROR "This sanitizer not yet supported in the MSVC environment: ${USE_SANITIZER}")
endif()
else()
message(FATAL_ERROR "USE_SANITIZER is not supported on this platform.")
endif()
+ elseif(MSVC)
+ if(USE_SANITIZER MATCHES "([Aa]ddress)")
+ message(STATUS "Building with Address sanitizer")
+ append("/fsanitize=address" CMAKE_C_SANITIZER_FLAGS CMAKE_CXX_SANITIZER_FLAGS)
+ else()
+ message(FATAL_ERROR "This sanitizer not yet supported in the MSVC environment: ${USE_SANITIZER}")
+ endif()
+ else()
+ message(FATAL_ERROR "USE_SANITIZER is not supported on this platform.")
endif()
endif()
diff --git a/configure.ac b/configure.ac
index 2520249..d5cef35 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@ AC_PREREQ([2.69])
## NOTE: Do not forget to change the version number here when we do a
## release!!!
##
-AC_INIT([HDF5], [1.13.1-1], [help@hdfgroup.org])
+AC_INIT([HDF5], [1.13.2-1], [help@hdfgroup.org])
AC_CONFIG_SRCDIR([src/H5.c])
AC_CONFIG_HEADERS([src/H5config.h])
@@ -231,9 +231,10 @@ AC_CACHE_CHECK([for cached host], [hdf5_cv_host], [hdf5_cv_host="none"]);
if test $hdf5_cv_host = "none"; then
hdf5_cv_host=$host
elif test $hdf5_cv_host != $host; then
- echo "The config.cache file was generated on $hdf5_cv_host but"
- echo "this is $host. Please remove that file and try again."
- AC_MSG_ERROR([config.cache file is invalid])
+ AC_MSG_ERROR([
+ The config.cache file was generated on $hdf5_cv_host but
+ this is $host. Please remove that file and try again.
+ config.cache file is invalid])
fi
## ----------------------------------------------------------------------
@@ -567,11 +568,7 @@ AC_ARG_ENABLE([fortran],
[Compile the Fortran interface [default=no]])],
[HDF_FORTRAN=$enableval])
-if test "X$HDF_FORTRAN" = "Xyes"; then
- echo "yes"
-else
- echo "no"
-fi
+AC_MSG_RESULT([$HDF_FORTRAN])
if test "X$HDF_FORTRAN" = "Xyes"; then
@@ -794,9 +791,9 @@ AC_ARG_ENABLE([cxx],
[AS_HELP_STRING([--enable-cxx],
[Compile the C++ interface [default=no]])],
[HDF_CXX=$enableval])
+AC_MSG_RESULT([$HDF_CXX])
if test "X$HDF_CXX" = "Xyes"; then
- echo "yes"
HDF5_INTERFACES="$HDF5_INTERFACES c++"
## Expose the compiler for *.in files
@@ -806,7 +803,6 @@ if test "X$HDF_CXX" = "Xyes"; then
AC_LANG_PUSH(C++)
else
- AC_MSG_RESULT([no])
CXX="no"
fi
@@ -1022,7 +1018,7 @@ AC_ARG_ENABLE([java],
if test "X$HDF_JAVA" = "Xyes"; then
if test "X${enable_shared}" != "Xno"; then
- echo "yes"
+ AC_MSG_RESULT([yes])
if test "X$CLASSPATH" = "X"; then
H5_CLASSPATH=".:$srcdir/java/lib"
else
@@ -1115,12 +1111,7 @@ AC_ARG_ENABLE([tests],
[Compile the HDF5 tests [default=yes]])],
[HDF5_TESTS=$enableval])
-if test "X$HDF5_TESTS" = "Xno"; then
- AC_MSG_RESULT([yes])
- echo "Building HDF5 tests is disabled"
-else
- AC_MSG_RESULT([no])
-fi
+AC_MSG_RESULT([$HDF5_TESTS])
## These need to be exposed for some tests.
AC_SUBST([H5_UTILS_TEST_BUILDDIR])
@@ -1145,12 +1136,7 @@ AC_ARG_ENABLE([tools],
[Compile the HDF5 tools [default=yes]])],
[HDF5_TOOLS=$enableval])
-if test "X$HDF5_TOOLS" = "Xno"; then
- AC_MSG_RESULT([yes])
- echo "Building HDF5 tools is disabled"
-else
- AC_MSG_RESULT([no])
-fi
+AC_MSG_RESULT([$HDF5_TOOLS])
## ----------------------------------------------------------------------
## Check if they would like to enable building doxygen files
@@ -1169,8 +1155,9 @@ AC_ARG_ENABLE([doxygen],
[Compile the HDF5 doxygen files [default=no]])],
[HDF5_DOXYGEN=$enableval])
+AC_MSG_RESULT([$HDF5_DOXYGEN])
+
if test "X$HDF5_DOXYGEN" = "Xyes"; then
- AC_MSG_RESULT([yes])
DX_DOXYGEN_FEATURE(ON)
DX_DOT_FEATURE(OFF)
DX_HTML_FEATURE(ON)
@@ -1230,10 +1217,6 @@ if test "X$HDF5_DOXYGEN" = "Xyes"; then
DOXYGEN_PREDEFINED='H5_HAVE_DIRECT H5_HAVE_LIBHDFS H5_HAVE_MAP_API H5_HAVE_PARALLEL H5_HAVE_ROS3_VFD'
DX_INIT_DOXYGEN([HDF5], [./doxygen/Doxyfile], [hdf5lib_docs])
-
-else
- AC_MSG_RESULT([no])
- echo "Doxygen support is disabled"
fi
@@ -1263,18 +1246,17 @@ AC_ARG_ENABLE([static_exec],
[Install only statically linked executables
[default=no]])],
[STATIC_EXEC=$enableval])
+AC_MSG_RESULT([$STATIC_EXEC])
if test "X$STATIC_EXEC" = "Xyes"; then
- echo "yes"
## Issue a warning if -static flag is not supported.
if test "X$lt_cv_prog_compiler_static_works" = "Xno"; then
- echo " warning: -static flag not supported on this system; executable won't statically link shared system libraries."
+ AC_MSG_WARN([-static flag not supported on this system; executable won't statically link shared system libraries.])
LT_STATIC_EXEC=""
else
LT_STATIC_EXEC="-all-static"
fi
else
- echo "no"
LT_STATIC_EXEC=""
fi
AM_CONDITIONAL([USE_PLUGINS_CONDITIONAL], [test "X$LT_STATIC_EXEC" = X])
@@ -2154,7 +2136,7 @@ for hdf5_cv_printf_ll in ll l L q unknown; do
],[[
char *s = malloc(128);
long long x = (long long)1048576 * (long long)1048576;
- sprintf(s,"%${hdf5_cv_printf_ll}d",x);
+ snprintf(s,128,"%${hdf5_cv_printf_ll}d",x);
exit(strcmp(s,"1099511627776"));
]])]
, [break],,[continue])
@@ -2710,9 +2692,11 @@ AC_ARG_ENABLE([memory-alloc-sanity-check],
[Enable this option to turn on internal memory
allocation sanity checking. This could cause
more memory use and somewhat slower allocation.
- This option is orthogonal to the
- --enable-using-memchecker option.
- [default=yes if debug build, otherwise no]
+ This option may also cause issues with HDF5
+ filter plugins, so should not be enabled if
+ filters are to be used. This option is orthogonal
+ to the --enable-using-memchecker option.
+ [default=no]
])],
[MEMORYALLOCSANITYCHECK=$enableval])
@@ -2722,11 +2706,10 @@ AC_SUBST([MEMORYALLOCSANITYCHECK])
## Set default
if test "X-$MEMORYALLOCSANITYCHECK" = X- ; then
- if test "X-$BUILD_MODE" = "X-debug" ; then
- MEMORYALLOCSANITYCHECK=yes
- else
- MEMORYALLOCSANITYCHECK=no
- fi
+# Should consider enabling this option by default for
+# 'developer' builds if that build mode is added in
+# the future
+ MEMORYALLOCSANITYCHECK=no
fi
case "X-$MEMORYALLOCSANITYCHECK" in
@@ -2987,11 +2970,11 @@ if test -n "$PARALLEL"; then
fi
## ----------------------------------------------------------------------
- ## Check for the MPI-3 functions necessary for the Parallel Compression
+ ## Check for the MPI functions necessary for the Parallel Compression
## feature. If these are not present, issue a warning that Parallel
## Compression will be disabled.
##
- AC_MSG_CHECKING([for MPI_Mprobe and MPI_Imrecv functions])
+ AC_MSG_CHECKING([for MPI_Ibarrier/MPI_Issend/MPI_Iprobe/MPI_Irecv functions])
AC_LINK_IFELSE(
[AC_LANG_PROGRAM(
@@ -2999,16 +2982,19 @@ if test -n "$PARALLEL"; then
#include <mpi.h>
]],
[[
- MPI_Message message;
+ int flag;
MPI_Init(0, (void *) 0);
- MPI_Mprobe(0, 0, 0, &message, (void *) 0);
- MPI_Imrecv((void *) 0, 0, 0, (void *) 0, (void *) 0);
+ MPI_Ibarrier(0, (void *) 0);
+ MPI_Issend((void *) 0, 0, 0, 0, 0, 0, (void *) 0);
+ MPI_Iprobe(0, 0, 0, &flag, (void *) 0);
+ MPI_Irecv((void *) 0, 0, 0, 0, 0, 0, (void *) 0);
]]
)],
[AC_MSG_RESULT([yes])
- PARALLEL_FILTERED_WRITES=yes],
+ PARALLEL_FILTERED_WRITES=yes
+ AC_DEFINE([HAVE_PARALLEL_FILTERED_WRITES], [1], [Define if we have support for writing to filtered datasets in parallel])],
[AC_MSG_RESULT([no])
- AC_MSG_WARN([A simple MPI program using the MPI_Mprobe and MPI_Imrecv functions could not be compiled and linked.
+ AC_MSG_WARN([A simple MPI program using the MPI_Ibarrier, MPI_Issend, MPI_Iprobe and MPI_Irecv functions could not be compiled and linked.
Parallel writes of filtered data will be disabled.])
PARALLEL_FILTERED_WRITES=no]
)
@@ -3675,8 +3661,8 @@ fi
## and installed with the libraries (used to generate libhdf5.settings).
##
-## HDF5 version from the first line of the README.txt file.
-H5_VERSION="`cut -d' ' -f3 $srcdir/README.txt | head -1`"
+## HDF5 version from the first line of the README.md file.
+H5_VERSION="`cut -d' ' -f3 $srcdir/README.md | head -1`"
AC_SUBST([H5_VERSION])
## Configuration date
@@ -3820,12 +3806,7 @@ AC_ARG_ENABLE([build-all],
[Build helper programs that only developers should need [default=no]])],
[BUILD_ALL=$enableval],
[BUILD_ALL=no])
-
-if test "X$BUILD_ALL" = "Xyes"; then
- echo "yes"
-else
- echo "no"
-fi
+AC_MSG_RESULT([$BUILD_ALL])
AM_CONDITIONAL([BUILD_ALL_CONDITIONAL], [test "X$BUILD_ALL" = "Xyes"])
## ----------------------------------------------------------------------
@@ -4161,20 +4142,19 @@ AC_CONFIG_FILES([src/libhdf5.settings
src/Makefile
test/Makefile
test/H5srcdir_str.h
- test/testabort_fail.sh
- test/testcheck_version.sh
- test/testerror.sh
- test/testexternal_env.sh
- test/testflushrefresh.sh
- test/testlibinfo.sh
- test/testlinks_env.sh
- test/testswmr.sh
- test/testvds_env.sh
- test/testvdsswmr.sh
- test/test_filter_plugin.sh
+ test/test_abort_fail.sh
+ test/test_check_version.sh
+ test/test_error.sh
+ test/test_external_env.sh
+ test/test_flush_refresh.sh
+ test/test_libinfo.sh
+ test/test_links_env.sh
test/test_mirror.sh
- test/test_usecases.sh
- test/test_vol_plugin.sh
+ test/test_plugin.sh
+ test/test_swmr.sh
+ test/test_use_cases.sh
+ test/test_vds_env.sh
+ test/test_vds_swmr.sh
testpar/Makefile
testpar/testpflush.sh
utils/Makefile
diff --git a/doc/img/release-schedule.plantuml b/doc/img/release-schedule.plantuml
new file mode 100644
index 0000000..f5aa62a
--- /dev/null
+++ b/doc/img/release-schedule.plantuml
@@ -0,0 +1,45 @@
+The release timeline was generated on PlantUML (https://plantuml.com)
+
+The current script:
+
+@startuml
+title HDF5 Release Schedule
+
+projectscale monthly
+Project starts 2021-01-01
+
+[1.8] starts 2021-01-01 and lasts 114 weeks
+[1.8.22] happens 2021-02-05
+[1.8.23] happens 2022-12-31
+[1.8.23] displays on same row as [1.8.22]
+[1.8] is colored in #CC6677
+
+[1.10] starts 2021-01-01 and lasts 114 weeks
+[1.10.8] happens 2021-10-22
+[1.10.9] happens 2022-05-31
+[1.10.9] displays on same row as [1.10.8]
+[1.10] is colored in #DDCC77
+
+[1.12] starts 2021-01-01 and lasts 114 weeks
+[1.12.1] happens 2021-07-01
+[1.12.2] happens 2022-04-30
+[1.12.2] displays on same row as [1.12.1]
+[1.12] is colored in #88CCEE
+
+[1.13] starts 2021-01-01 and lasts 104 weeks
+[1.13.0] happens 2021-12-01
+[1.13.1] happens 2022-03-02
+[1.13.2] happens 2022-06-31
+[1.13.3] happens 2022-08-31
+[1.13.4] happens 2022-10-31
+[1.13.1] displays on same row as [1.13.0]
+[1.13.2] displays on same row as [1.13.0]
+[1.13.3] displays on same row as [1.13.0]
+[1.13.4] displays on same row as [1.13.0]
+[1.13] is colored in #44AA99
+
+[1.14] starts at 2022-12-31 and lasts 10 weeks
+[1.14.0] happens at 2022-12-31
+[1.14] is colored in #AA4499
+@enduml
+
diff --git a/doc/img/release-schedule.png b/doc/img/release-schedule.png
new file mode 100755
index 0000000..c2ed241
--- /dev/null
+++ b/doc/img/release-schedule.png
Binary files differ
diff --git a/doxygen/aliases b/doxygen/aliases
index f83a875..11fa691 100644
--- a/doxygen/aliases
+++ b/doxygen/aliases
@@ -299,7 +299,7 @@ ALIASES += ref_rfc20130630="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/Design-
ALIASES += ref_rfc20130316="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/HDF5DynamicallyLoadedFilters.pdf\">HDF5 Dynamically Loaded Filters</a>"
ALIASES += ref_rfc20121114="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/DECTRIS%20Integration%20RFC%202012-11-29.pdf\">Direct Chunk Write</a>"
ALIASES += ref_rfc20121024="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/FileSpaceManagement.pdf\">HDF5 File Space Management</a>"
-ALIASES += ref_rfc20120828="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/H5HPC_MultiDset_RW_IO_RFC_v4_20130320.docx.pdf\">New HDF5 API Routines for HPC Applications</a>"
+ALIASES += ref_rfc20120828="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/H5HPC_MultiDset_RW_IO_RFC_v6_20220124.pdf\">New HDF5 API Routines for HPC Applications - Read/Write Multiple Datasets in an HDF5 file</a>"
ALIASES += ref_rfc20120523="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/paged_aggregation.pdf\">HDF5 File Space Management: Paged Aggregation</a>"
ALIASES += ref_rfc20120501="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/HDF5FileImageOperations.pdf\">HDF5 File Image Operations</a>"
ALIASES += ref_rfc20120305="<a href=\"https://docs.hdfgroup.org/hdf5/rfc/RFC%20PHDF5%20Consistency%20Semantics%20MC%20120328.docx.pdf\">Enabling a Strict Consistency Semantics Model in Parallel HDF5</a>"
diff --git a/doxygen/dox/About.dox b/doxygen/dox/About.dox
index 0b21fcc..a8b31d7 100644
--- a/doxygen/dox/About.dox
+++ b/doxygen/dox/About.dox
@@ -124,6 +124,4 @@ version.
Talk to your friendly IT-team if you need write access, or you need someone to
push an updated version for you!
-\todo Make the publication a GitHub action!
-
*/ \ No newline at end of file
diff --git a/doxygen/dox/Glossary.dox b/doxygen/dox/Glossary.dox
new file mode 100644
index 0000000..9ccd27d
--- /dev/null
+++ b/doxygen/dox/Glossary.dox
@@ -0,0 +1,565 @@
+/** \page GLS Glossary
+
+\section GLS_A A
+
+<DL>
+ <DT>Array datatype</DT>
+ <DD>A family of HDF5 datatypes whose elements are arrays of a fixed rank (≤
+ 32) and fixed finite extent. All array elements must be of the same HDF5
+ datatype.</DD>
+</DL>
+
+<DL>
+ <DT>Array variable</DT>
+ <DD><P>A variable that can store (logically) dense, rectilinear, multidimensional
+ arrays of elements of a given HDF5 datatype.</P>
+ <P>The combination of array rank (dimensionality) and extent is called an
+ array variable's shape. This includes the degenerate array shapes of a
+ singleton (scalar) and the empty array (null).</P>
+ <P>The array element datatype is sometimes referred to as the array
+ variable's type, which is not entirely accurate because the array variable's
+ type is 'array of element type' rather than 'element type'.</P>
+ <P>In HDF5, there are two kinds of array variables, attributes and datasets,
+ and the distinction is functional (i.e., how they can be used) rather than
+ conceptual. Attributes are commonly used for descriptive "light-weight"
+ HDF5 object metadata while datasets are HDF5 objects used to store
+ "heavy-weight" problem-sized data.</P>
+ </DD>
+</DL>
+
+<DL>
+ <DT>Attribute</DT>
+ <DD><P>A named array variable that is associated with an HDF5 object, its
+ owner or attributee, and used to represent application domain-specific
+ metadata of the object. Intuitively, the set of an object's attributes can
+ be thought of as its key-value pair collection. Attribute names (keys) can
+ be arbitrary Unicode strings, but must be unique per object, i.e., an
+ object can have at most one attribute with a given name.</P>
+ <P>A scalar attribute is an attribute backed by a singleton array
+ variable. A null attribute is attribute backed by an empty array
+ variable.</P>
+</DD>
+</DL>
+
+\section GLS_B B
+
+<DL>
+ <DT>Bitfield datatype</DT>
+ <DD>A family of HDF5 datatypes whose elements are fixed-width bit fields.</DD>
+</DL>
+
+\section GLS_C C
+
+<DL>
+ <DT>Chunked layout</DT>
+ <DD>
+ <P>A dataset storage layout where the dataset elements are partitioned into
+ fixed-size multidimensional chunks or tiles. Chunked layout is mandatory
+ for datasets with one or more dimensions of indefinite (infinite) extent
+ or where compression or other filters are applied to the dataset elements.</P>
+ <P>Chunked layout may improve I/O performance for certain access patterns.</P>
+</DD>
+</DL>
+
+<DL>
+ <DT>Committed datatype</DT>
+ <DD>An immutable kind of HDF5 object that is used to store an HDF5 datatype
+ definition, which can be referenced by multiple array variables. When linked
+ to an HDF5 group, a committed datatype can be located by an HDF5 path name,
+ and is sometimes called a named datatype.</DD>
+</DL>
+
+<DL>
+ <DT>Compact layout</DT>
+ <DD></DD>
+</DL>
+
+<DL>
+ <DT>Compound datatype</DT>
+ <DD>
+ <P>A family of HDF5 datatypes whose elements are records with named fields
+ of other HDF5 datatypes. Currently, on ASCII field names are supported.</P>
+ <P>Similar to a <CODE>struct</CODE> in C or a <CODE>COMMON</CODE> block in
+ Fortran.</P>
+</DD>
+</DL>
+
+<DL>
+ <DT>Contiguous layout</DT>
+ <DD>A dataset storage layout where the dataset elements are physically stored
+ in an HDF5 file as a contiguous block of bytes.</DD>
+</DL>
+
+\section GLS_D D
+
+<DL>
+ <DT>Dataset</DT>
+ <DD>
+ <P>A kind of HDF5 object, a linked array variable. which can be located in
+ an HDF5 file through a path name. Datasets are commonly used to store
+ "heavy-weight" problem-sized data.</P>
+ <P>The HDF5 library offers a lot of features aimed at optimized dataset
+ access and storage, including compression and partial I/O.</P>
+</DD>
+</DL>
+
+<DL>
+ <DT>Dataspace</DT>
+ <DD>The shape of an array variable. With the exception of degenerate cases
+ (empty set, singleton), this is a rectilinear lattice or grid of a certain
+ rank (dimensionality) and extent.</DD>
+</DL>
+
+<DL>
+ <DT>Datatype</DT>
+ <DD>
+ <P>An HDF5 datatype consists of an abstract data type (a set of elements)
+ and a bit-level representation of these elements in storage such as an HDF5
+ file or memory.</P>
+ <P>The HDF5 library comes with a large set of predefined datatypes and
+ offers mechanisms for creating user-defined datatypes.</P>
+ <P>The ten major families or classes of HDF5 datatypes are:</P>
+ <UL>
+ <LI>Integer datatypes</LI>
+ <LI>Floating-point number datatypes</LI>
+ <LI>String datatypes</LI>
+ <LI>Bitfield datatypes</LI>
+ <LI>Opaque datatypes</LI>
+ <LI>Compound datatypes</LI>
+ <LI>Reference datatypes</LI>
+ <LI>Enumerated datatypes</LI>
+ <LI>Variable-length sequence datatypes</LI>
+ <LI>Array datatypes</LI>
+ </UL>
+</DD>
+</DL>
+
+\section GLS_E E
+
+<DL>
+ <DT>Enumeration datatype</DT>
+ <DD>A family of HDF5 datatypes whose elements represent named integer values
+ called members or enumerators. Currently, only ASCII names are supported.</DD>
+</DL>
+
+<DL>
+ <DT>External layout</DT>
+ <DD>A form of contiguous layout where a dataset's elements are physically
+ stored in unformatted binary files outside the HDF5 file.</DD>
+</DL>
+
+<DL>
+ <DT>External link</DT>
+ <DD>An HDF5 link whose destination is specified as a pair of an HDF5 file name
+and an HDF5 path name in that file.</DD>
+</DL>
+
+\section GLS_F F
+
+<DL>
+ <DT>Field</DT>
+ <DD>See compound datatype.</DD>
+</DL>
+
+<DL>
+ <DT>File</DT>
+ <DD>
+ <OL>
+ <LI>A byte stream (in a storage context such as a file system or in
+ memory) formatted according to the HDF5 File Format Specification.</LI>
+ <LI>A (logical) container for HDF5 objects.</LI>
+ </OL>
+ </DD>
+</DL>
+
+<DL>
+ <DT>File format</DT>
+ <DD></DD>
+</DL>
+
+<DL>
+ <DT>Fill value</DT>
+ <DD></DD>
+</DL>
+
+<DL>
+ <DT>Filter</DT>
+ <DD></DD>
+</DL>
+
+\section GLS_G G
+
+<DL>
+ <DT>Group</DT>
+ <DD>
+ <P>A kind of HDF5 object that stores a collection of HDF5 links. Each HDF5
+ file contains at least one group, it's root group.</P>
+ <P>Among the destinations of an HDF5 group's links may be other HDF5 groups
+ (including the group itself!). This ability is sometimes referred to as the
+ closure property of groups. It is the basis for creating hierarchical or
+ more general graph-like structures.</P>
+</DD>
+</DL>
+
+\section GLS_H H
+
+<DL>
+ <DT>Hard link</DT>
+ <DD>An HDF5 link whose destination is specified (internally) as the address of
+ an HDF5 object in the same HDF5 file.</DD>
+</DL>
+
+<DL>
+ <DT>Hierarchy</DT>
+ <DD>See group.</DD>
+</DL>
+
+<DL>
+ <DT>Hyperslab</DT>
+ <DD>
+ <P>A regular multidimensional pattern described by four vectors whose length
+ equals the rank of the pattern.</P>
+ <OL>
+ <LI><CODE>start</CODE> - the offset where the first block of the hyperslab begins</LI>
+ <LI><CODE>stride</CODE> - the offset between pattern blocks</LI>
+ <LI><CODE>count</CODE> - the number of blocks</LI>
+ <LI><CODE>block</CODE> - the extent of an individual pattern block</LI>
+ </OL>
+ <P>For example, the black squares on a (two-dimensional) chessboard with
+ origin at <CODE>(0,0)</CODE> can be represented as the union of two
+ hyperslabs representing the even <CODE>(0,2,4,6)</CODE> and
+ odd <CODE>(1,3,5,7)</CODE> rows, respectively.</P>
+ <IMG SRC="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d7/Chessboard480.svg/176px-Chessboard480.svg.png"/>
+ <P>The hyperslab parameters for the even rows are: <CODE>start (0,0)</CODE>,
+ <CODE>stride (2,2)</CODE>, <CODE>count (4,4)</CODE>, <CODE>block
+ (1,1)</CODE>. Likewise the parameters for the odd rows are: <CODE>start
+ (1,1)</CODE>, <CODE>stride (2,2)</CODE>, <CODE>count
+ (4,4)</CODE>, <CODE>block (1,1)</CODE>.</P>
+</DD>
+</DL>
+
+\section GLS_I I
+
+<DL>
+ <DT>Identifier</DT>
+ <DD>An opaque, transient handle used by the HDF5 library to manipulate
+ in-memory representations of HDF5 items.</DD>
+</DL>
+
+\section GLS_L L
+
+<DL>
+ <DT>Library</DT>
+ <DD></DD>
+</DL>
+
+<DL>
+ <DT>Link</DT>
+ <DD>
+ <P>A named, uni-directional association between a source and a
+ destination. In HDF5, the source is always the HDF5 group that hosts the
+ link in its link collection.</P>
+ <P>There are several ways to specify a link's destination:</P>
+ <UL>
+ <LI>The address of an HDF5 object in the same HDF5 file; so-called hard
+ link.</LI>
+ <LI>A path name in the same or a different file; so-called soft or
+ external link.</LI>
+ <LI>User-defined</LI>
+ </UL>
+ <P>A link name can be any Unicode string that does not contain slashes
+ (<CODE>"/"</CODE>) or consists of a single dot character
+ (<CODE>"."</CODE>). A link name must be unique in a group's link
+ collection.</P>
+ </DD>
+</DL>
+
+\section GLS_M M
+
+<DL>
+ <DT>Metadata</DT>
+ <DD>Data that in a given context has a descriptive or documentation function
+ for other data. Typically, the metadata is small compared to the data it
+ describes.</DD>
+</DL>
+
+<DL>
+ <DT>Member</DT>
+ <DD>
+ <P>A link destination is sometimes referred to as a member of the link's
+ source (group). This way of speaking invites confusion: A destination (e.g.,
+ object) can be the destination of multiple links in the same (!) or
+ different groups. It would then be a "member" of a given group with
+ multiplicity greater than one and be a member of multiple groups.</P>
+ <P> It is the link that is a member of the group's link collection and not
+ the link destination.</P>
+ </DD>
+</DL>
+
+\section GLS_N N
+
+<DL>
+ <DT>Name</DT>
+ <DD>
+ <P>A Unicode string that depending on the item it names might be subject to
+ certain character restrictions, such as ASCII-encoded only. In HDF5, the
+ user might encounter the following names:</P>
+ <UL>
+ <LI>A link name</LI>
+ <LI>A path name</LI>
+ <LI>An attribute name</LI>
+ <LI>A field name (compound datatypes)</LI>
+ <LI>A constant name (enumeration datatypes)</LI>
+ <LI>A tag name (opaque datatypes)</LI>
+ <LI>A file name</LI>
+ </UL>
+ </DD>
+</DL>
+
+
+<DL>
+ <DT>Named datatype</DT>
+ <DD>See committed datatype.</DD>
+</DL>
+
+<DL>
+ <DT>Null dataspace</DT>
+ <DD>A shape which represents the empty set. Array variables with this shape
+ cannot store any values.</DD>
+</DL>
+
+\section GLS_O O
+
+<DL>
+ <DT>Object</DT>
+ <DD>An HDF5 group, dataset or named datatype; an HDF5 item that can be linked
+ to zero or more groups and decorated with zero or more HDF5 attributes.</DD>
+</DL>
+
+<DL>
+ <DT>Object reference</DT>
+ <DD>
+ <OL>
+ <LI>A datatype for representing references to objects in a file.</LI>
+ <LI>A value of the object reference datatype.</LI>
+ </OL>
+ </DD>
+</DL>
+
+<DL>
+ <DT>Opaque datatype</DT>
+ <DD>A family of HDF5 datatypes whose elements are byte sequences of a given
+ fixed length. An opaque datatype can be tagged with a sequence of up to 256
+ ASCII characters, e.g., MIME code.</DD>
+</DL>
+
+\section GLS_P P
+
+<DL>
+ <DT>Path name</DT>
+ <DD>A Unicode string that is the concatenation of link names separated by
+ slashes (<CODE>'/'</CODE>). In HDF5, path names are used to locate and refer
+ to HDF5 objects.</DD>
+</DL>
+
+<DL>
+ <DT>Plugin</DT>
+ <DD>An HDF5 library feature or capability that can be added dynamically at
+ application run time rather than library compilation time. Plugins are
+ usually implemented as shared libraries, and their discovery and loading
+ behavior can be controlled programmatically or through environment
+ variables.
+ </DD>
+</DL>
+
+<DL>
+ <DT>Point selection</DT>
+ <DD>A dataspace selection that consists of a set of points (coordinates) in
+ the same dataspace.</DD>
+</DL>
+
+<DL>
+ <DT>Property list</DT>
+ <DD>
+ <P>An HDF5 API construct, a means of customizing the behavior of the HDF5
+ library when creating, accessing or modifying HDF5 items.</P>
+ <P>While the default property settings are sufficient in many cases, certain
+ HDF5 features, such as compression, can be reasonably controlled only by the
+ user who has to provide the desired settings via property lists.</P>
+</DD>
+</DL>
+
+\section GLS_R R
+
+<DL>
+ <DT>Rank</DT>
+ <DD>The number of dimensions of a non-null dataspace.</DD>
+</DL>
+
+<DL>
+ <DT>Reference</DT>
+ <DD>
+ <OL>
+ <LI>An HDF5 object reference</LI>
+ <LI>An HDF5 dataset region reference</LI>
+ </OL>
+ </DD>
+</DL>
+
+<DL>
+ <DT>Reference datatype</DT>
+ <DD>
+ <OL>
+ <LI>An HDF5 datatype whose elements represent references to HDF5
+ objects.</LI>
+ <LI>An HDF5 datatype whose elements represent references to regions of an
+ HDF5 dataset.</LI>
+ </OL>
+ </DD>
+</DL>
+
+<DL>
+ <DT>Region reference</DT>
+ <DD>See dataset region reference.</DD>
+</DL>
+
+<DL>
+ <DT>Root group</DT>
+ <DD>
+ <P>An HDF5 group that is present in all HDF5 files and that acts as the
+ entry or base point for all other data stored in an HDF5 file.</P>
+ <P>The root group is "the mother of all objects" in an HDF5 file in the
+ sense that all objects (and their attributes) can be discovered,
+ beginning at the root group, by combinations of the following
+ operations:</P>
+ <UL>
+ <LI>Link traversal</LI>
+ <LI>De-referencing of object references</LI>
+ </UL>
+ <P>This discovery is portable and robust with respect to file-internal
+ storage reorganization.</P>
+</DD>
+</DL>
+
+\section GLS_S S
+
+<DL>
+ <DT>Scalar dataspace</DT>
+ <DD>A kind of HDF5 dataspace that has the shape of a singleton, i.e., a set
+ containing a single element. Array variables with this shape store exactly one
+ element.</DD>
+</DL>
+
+<DL>
+ <DT>Selection</DT>
+ <DD>
+ <OL>
+ <LI>A subset of points of an HDF5 dataspace. The subset might be a point
+ selection or a combination (union, intersection, etc.) of hyperslabs.</LI>
+ <LI>A subset of dataset elements associated with a dataspace selection as
+ described under 1.</LI>
+ </OL>
+ </DD>
+</DL>
+
+<DL>
+ <DT>Serialization</DT>
+ <DD>
+ <OL>
+ <LI>The flattening of an N-dimensional array into a 1-dimensional
+ array.</LI>
+ <LI>The encoding of a complex data item as a linear byte stream.</LI>
+ </OL>
+ </DD>
+</DL>
+
+<DL>
+ <DT>Soft link</DT>
+ <DD>A kind of HDF5 link in which the link destination is specified as an HDF5
+ path name. The path name may or may not refer to an actual object.</DD>
+</DL>
+
+<DL>
+ <DT>Storage layout</DT>
+ <DD>The storage arrangement for dataset elements, links in a group's link
+ collection, or attributes in an object's attribute collection.</DD>
+</DL>
+
+<DL>
+ <DT>String datatype</DT>
+ <DD></DD>
+</DL>
+
+<DL>
+ <DT>Super block</DT>
+ <DD>An HDF5 file format primitive; a block of data which contains information
+ required to access HDF5 files in a portable manner on multiple platforms. The
+ super block contains information such as version numbers, the size of offsets
+ and lengths, and the location of the root group.</DD>
+</DL>
+
+<DL>
+ <DT>SWMR</DT>
+ <DD>Single Writer Multiple Reader, a file access mode in which a single
+ process is permitted to write data to an HDF5 file while other processes are
+ permitted to read data from the same file without the need of inter-process
+ communication or synchronization.</DD>
+</DL>
+
+<DL>
+ <DT>Symbolic link</DT>
+ <DD>An external link or a soft link.</DD>
+</DL>
+
+\section GLS_U U
+
+<DL>
+ <DT>User block</DT>
+ <DD>An HDF5 file format primitive that allows one to set aside a fixed-size
+ (at least 512 bytes or any power of 2 thereafter) contiguous range of bytes at
+ the beginning of an HDF5 file for application purposes which will be
+ skipped/ignored by the HDF5 library.</DD>
+</DL>
+
+<DL>
+ <DT>UTF-8</DT>
+ <DD>
+ <P>A variable-length (1-4 bytes per code point) encoding of the Unicode set
+ of code points. This is the encoding supported by HDF5 to represent Unicode
+ strings.</P>
+ <P>The ASCII encoding is a proper subset of UTF-8.</P>
+</DD>
+</DL>
+
+\section GLS_V V
+
+<DL>
+ <DT>Variable-length (sequence) datatype</DT>
+ <DD>A family of HDF5 datatypes whose elements are variable-length sequences of
+ a given datatype.</DD>
+</DL>
+
+<DL>
+ <DT>Virtual Dataset (VDS)</DT>
+ <DD>An HDF5 dataset with virtual storage layout. A dataset whose elements are
+ partially or entirely stored physically in other datasets.</DD>
+</DL>
+
+<DL>
+ <DT>Virtual File Driver (VFD)</DT>
+ <DD></DD>
+</DL>
+
+
+<DL>
+ <DT>Virtual layout</DT>
+ <DD></DD>
+</DL>
+
+
+<DL>
+ <DT>Virtual Object Layer (VOL)</DT>
+ <DD></DD>
+</DL>
+
+*/
diff --git a/doxygen/dox/Overview.dox b/doxygen/dox/Overview.dox
index e9c52c2..040769c 100644
--- a/doxygen/dox/Overview.dox
+++ b/doxygen/dox/Overview.dox
@@ -1,9 +1,8 @@
/** \mainpage notitle
-This is the documentation set for HDF5. You can
-<a href="hdf5-doc.tgz">download</a> it as a tgz archive for offline reading.
-This documentation includes specifications and documentation
+This is the documentation set for HDF5.
+It includes specifications and documentation
of software and tools developed and maintained by
<a href="https://www.hdfgroup.org/">The HDF Group</a>. It is impractical to document
the entire HDF5 ecosystem in one place, and you should also consult the documentation
@@ -38,8 +37,8 @@ documents cover a mix of tasks, concepts, and reference, to help a specific
You can <a href="hdf5-doc.tgz">download</a> it as a tgz archive for offline reading.
\par History
- A snapshot (April 2017) of the pre-Doxygen HDF5 documentation can be found
- <a href="https://hdfgroup.github.io/archive/hdf5doc/">here</a>.
+ A snapshot (~April 2017) of the pre-Doxygen HDF5 documentation can be found
+ <a href="https://docs.hdfgroup.org/archive/support/HDF5/doc/index.html">here</a>.
\par ToDo List
There is plenty of <a href="./todo.html">unfinished business</a>.
diff --git a/doxygen/examples/H5.format.1.0.html b/doxygen/examples/H5.format.1.0.html
index cdc19ec7..ff21315 100644
--- a/doxygen/examples/H5.format.1.0.html
+++ b/doxygen/examples/H5.format.1.0.html
@@ -10,12 +10,12 @@
<table border=0 width=90%>
<tr>
<td valign=top>
- <ol type=I>
+ <ol type="I">
<li><a href="#Intro">Introduction</a>
<li><a href="#BootBlock">Disk Format Level 0 - File Signature and Super Block</a>
<li><a href="#Group">Disk Format Level 1 - File Infrastructure</a>
<font size=-2>
- <ol type=A>
+ <ol type="A">
<li><a href="#Btrees">Disk Format Level 1A - B-link Trees and B-tree Nodes</a>
<li><a href="#SymbolTable">Disk Format Level 1B - Group</a>
<li><a href="#SymbolTableEntry">Disk Format Level 1C - Group Entry</a>
@@ -26,9 +26,9 @@
</font>
<li><a href="#DataObject">Disk Format Level 2 - Data Objects</a>
<font size=-2>
- <ol type=A>
+ <ol type="A">
<li><a href="#ObjectHeader">Disk Format Level 2a - Data Object Headers</a>
- <ol type=1>
+ <ol type="1">
<li><a href="#NILMessage">Name: NIL</a> <!-- 0x0000 -->
<li><a href="#SimpleDataSpace">Name: Simple Dataspace</a> <!-- 0x0001 -->
<!--
@@ -41,13 +41,13 @@
</font>
</ol>
</td><td>&nbsp;&nbsp;</td><td valign=top>
- <ol type=I>
+ <ol type="I" start="4">
<li><a href="#DataObject">Disk Format Level 2 - Data Objects</a>
<font size=-2><i>(Continued)</i>
- <ol type=A>
+ <ol type="A">
<li><a href="#ObjectHeader">Disk Format Level 2a - Data Object Headers</a><i>(Continued)</i>
- <ol type=1>
+ <ol type="1" start="6">
<li><a href="#CompactDataStorageMessage">Name: Data Storage - Compact</a> <!-- 0x0006 -->
<li><a href="#ExternalFileListMessage">Name: Data Storage - External Data Files</a> <!-- 0x0007 -->
<li><a href="#LayoutMessage">Name: Data Storage - Layout</a> <!-- 0x0008 -->
@@ -732,7 +732,7 @@ Elena> "Free-space object"
only does the level indicate whether child pointers
point to sub-trees or to data, but it can also be used
to help file consistency checking utilities reconstruct
- damanged trees.</td>
+ damaged trees.</td>
</tr>
<tr valign=top>
diff --git a/doxygen/examples/H5.format.1.1.html b/doxygen/examples/H5.format.1.1.html
index 9894fad..0ae31df 100644
--- a/doxygen/examples/H5.format.1.1.html
+++ b/doxygen/examples/H5.format.1.1.html
@@ -36,18 +36,18 @@ TABLE.list TD { border:none; }
<table border=0 width=90%>
<tr>
<td valign=top>
- <ol type=I>
+ <ol type="I">
<li><a href="#Intro">Introduction</a>
<li><a href="#FileMetaData">Disk Format Level 0 - File Metadata</a>
<font size=-2>
- <ol type=A>
+ <ol type="A">
<li><a href="#SuperBlock">Disk Format Level 0A - File Signature and Super Block</a>
<li><a href="#DriverInfo">Disk Format Level 0B - File Driver Info</a>
</ol>
</font>
<li><a href="#FileInfra">Disk Format Level 1 - File Infrastructure</a>
<font size=-2>
- <ol type=A>
+ <ol type="A">
<li><a href="#Btrees">Disk Format Level 1A - B-link Trees and B-tree Nodes</a>
<li><a href="#SymbolTable">Disk Format Level 1B - Group</a>
<li><a href="#SymbolTableEntry">Disk Format Level 1C - Group Entry</a>
@@ -58,9 +58,9 @@ TABLE.list TD { border:none; }
</font>
<li><a href="#DataObject">Disk Format Level 2 - Data Objects</a>
<font size=-2>
- <ol type=A>
+ <ol type="A">
<li><a href="#ObjectHeader">Disk Format Level 2a - Data Object Headers</a>
- <ol type=1>
+ <ol type="1">
<li><a href="#NILMessage">Name: NIL</a> <!-- 0x0000 -->
<li><a href="#SimpleDataSpace">Name: Simple Dataspace</a> <!-- 0x0001 -->
<!-- <li><a href="#DataSpaceMessage">Name: Complex Dataspace</a> --> <!-- 0x0002 -->
@@ -73,13 +73,13 @@ TABLE.list TD { border:none; }
</font>
</ol>
</td><td>&nbsp;&nbsp;</td><td valign=top>
- <ol type=I start=4>
+ <ol type="I" start="4">
<li><a href="#DataObject">Disk Format Level 2 - Data Objects</a>
<font size=-2><i>(Continued)</i>
<ol type=A>
<li><a href="#ObjectHeader">Disk Format Level 2a - Data Object Headers</a><i>(Continued)</i>
- <ol type=1 start=6>
+ <ol type="1" start="7">
<!-- <li><a href="#CompactDataStorageMessage">Name: Data Storage - Compact</a> --> <!-- 0x0006 -->
<li><a href="#ReservedMessage_0006">Name: Reserved - not assigned yet</a> <!-- 0x0006 -->
<li><a href="#ExternalFileListMessage">Name: Data Storage - External Data Files</a> <!-- 0x0007 -->
@@ -1059,7 +1059,7 @@ TABLE.list TD { border:none; }
only does the level indicate whether child pointers
point to sub-trees or to data, but it can also be used
to help file consistency checking utilities reconstruct
- damanged trees.
+ damaged trees.
</P>
</td>
</tr>
diff --git a/doxygen/hdf5doxy_layout.xml b/doxygen/hdf5doxy_layout.xml
index 6efa690..24642b5 100644
--- a/doxygen/hdf5doxy_layout.xml
+++ b/doxygen/hdf5doxy_layout.xml
@@ -7,7 +7,7 @@
<tab type="user" url="@ref Cookbook" title="Cookbook" />
<tab type="user" url="https://portal.hdfgroup.org/display/HDF5/HDF5+User+Guides" title="User Guides" />
<tab type="user" url="https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide" title="Application Developer's Guide" />
- <tab type="user" url="https://portal.hdfgroup.org/display/HDF5/HDF5+Glossary" title="Glossary" />
+ <tab type="user" url="@ref GLS" title="Glossary" />
<tab type="user" url="@ref RM" title="Reference Manual" />
<tab type="user" url="@ref TN" title="Technical Notes" />
<tab type="user" url="@ref SPEC" title="Specifications" />
diff --git a/doxygen/img/HDF5.png b/doxygen/img/HDF5.png
new file mode 100644
index 0000000..0458fa8
--- /dev/null
+++ b/doxygen/img/HDF5.png
Binary files differ
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 9ab870f..3f329c1 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -42,6 +42,14 @@ set (examples
h5_vds-percival-unlim-maxmin
)
+if (H5_HAVE_PARALLEL)
+ set (parallel_examples
+ ph5example
+ ph5_filtered_writes
+ ph5_filtered_writes_no_sel
+ )
+endif ()
+
foreach (example ${examples})
add_executable (${example} ${HDF5_EXAMPLES_SOURCE_DIR}/${example}.c)
target_include_directories (${example} PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
@@ -63,23 +71,25 @@ foreach (example ${examples})
endforeach ()
if (H5_HAVE_PARALLEL)
- add_executable (ph5example ${HDF5_EXAMPLES_SOURCE_DIR}/ph5example.c)
- target_include_directories (ph5example PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
- if (NOT BUILD_SHARED_LIBS)
- TARGET_C_PROPERTIES (ph5example STATIC)
- target_link_libraries (ph5example PRIVATE ${HDF5_LIB_TARGET} ${MPI_C_LIBRARIES})
- else ()
- TARGET_C_PROPERTIES (ph5example SHARED)
- target_link_libraries (ph5example PRIVATE ${HDF5_LIBSH_TARGET} ${MPI_C_LIBRARIES})
- endif ()
- set_target_properties (ph5example PROPERTIES FOLDER examples)
+ foreach (parallel_example ${parallel_examples})
+ add_executable (${parallel_example} ${HDF5_EXAMPLES_SOURCE_DIR}/${parallel_example}.c)
+ target_include_directories (${parallel_example} PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
+ if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (${parallel_example} STATIC)
+ target_link_libraries (${parallel_example} PRIVATE ${HDF5_LIB_TARGET} ${MPI_C_LIBRARIES})
+ else ()
+ TARGET_C_PROPERTIES (${parallel_example} SHARED)
+ target_link_libraries (${parallel_example} PRIVATE ${HDF5_LIBSH_TARGET} ${MPI_C_LIBRARIES})
+ endif ()
+ set_target_properties (${parallel_example} PROPERTIES FOLDER examples)
- #-----------------------------------------------------------------------------
- # Add Target to clang-format
- #-----------------------------------------------------------------------------
- if (HDF5_ENABLE_FORMATTERS)
- clang_format (HDF5_EXAMPLES_ph5example_FORMAT ph5example)
- endif ()
+ #-----------------------------------------------------------------------------
+ # Add Target to clang-format
+ #-----------------------------------------------------------------------------
+ if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_EXAMPLES_${parallel_example}_FORMAT ${parallel_example})
+ endif ()
+ endforeach ()
endif ()
if (BUILD_TESTING AND HDF5_TEST_EXAMPLES)
diff --git a/examples/CMakeTests.cmake b/examples/CMakeTests.cmake
index 70142c8..3e24ba0 100644
--- a/examples/CMakeTests.cmake
+++ b/examples/CMakeTests.cmake
@@ -101,22 +101,26 @@ if (H5_HAVE_PARALLEL AND HDF5_TEST_PARALLEL AND NOT WIN32)
# Ensure that 24 is a multiple of the number of processes.
# The number 24 corresponds to SPACE1_DIM1 and SPACE1_DIM2 defined in ph5example.c
math(EXPR NUMPROCS "24 / ((24 + ${MPIEXEC_MAX_NUMPROCS} - 1) / ${MPIEXEC_MAX_NUMPROCS})")
- if (HDF5_ENABLE_USING_MEMCHECKER)
- add_test (NAME MPI_TEST_EXAMPLES-ph5example COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:ph5example> ${MPIEXEC_POSTFLAGS})
- else ()
- add_test (NAME MPI_TEST_EXAMPLES-ph5example COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE};${MPIEXEC_NUMPROC_FLAG};${NUMPROCS};${MPIEXEC_PREFLAGS};$<TARGET_FILE:ph5example>;${MPIEXEC_POSTFLAGS}"
- -D "TEST_ARGS:STRING="
- -D "TEST_EXPECT=0"
- -D "TEST_OUTPUT=ph5example.out"
- -D "TEST_REFERENCE:STRING=PHDF5 tests finished with no errors"
- -D "TEST_FILTER:STRING=PHDF5 tests finished with no errors"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
- -P "${HDF_RESOURCES_EXT_DIR}/grepTest.cmake"
- )
- endif ()
- if (last_test)
- set_tests_properties (MPI_TEST_EXAMPLES-ph5example PROPERTIES DEPENDS ${last_test})
- endif ()
- set (last_test "MPI_TEST_EXAMPLES-ph5example")
+
+ foreach (parallel_example ${parallel_examples})
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME MPI_TEST_EXAMPLES-${parallel_example} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${parallel_example}> ${MPIEXEC_POSTFLAGS})
+ else ()
+ add_test (NAME MPI_TEST_EXAMPLES-${parallel_example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE};${MPIEXEC_NUMPROC_FLAG};${NUMPROCS};${MPIEXEC_PREFLAGS};$<TARGET_FILE:${parallel_example}>;${MPIEXEC_POSTFLAGS}"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${parallel_example}.out"
+ -D "TEST_REFERENCE:STRING=PHDF5 example finished with no errors"
+ #-D "TEST_FILTER:STRING=PHDF5 tests finished with no errors"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/grepTest.cmake"
+ )
+ endif ()
+ if (last_test)
+ set_tests_properties (MPI_TEST_EXAMPLES-${parallel_example} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "MPI_TEST_EXAMPLES-${parallel_example}")
+ endforeach ()
endif ()
diff --git a/examples/Makefile.am b/examples/Makefile.am
index 7b5aa63..161f789 100644
--- a/examples/Makefile.am
+++ b/examples/Makefile.am
@@ -20,7 +20,7 @@
include $(top_srcdir)/config/commence.am
if BUILD_PARALLEL_CONDITIONAL
- EXAMPLE_PROG_PARA = ph5example
+ EXAMPLE_PROG_PARA = ph5example ph5_filtered_writes ph5_filtered_writes_no_sel
endif
INSTALL_SCRIPT_FILES = run-c-ex.sh
@@ -50,7 +50,7 @@ INSTALL_FILES = h5_write.c h5_read.c h5_extend_write.c h5_chunk_read.c h5_compou
h5_group.c h5_select.c h5_attribute.c h5_mount.c h5_drivers.c \
h5_reference_deprec.c h5_ref_extern.c h5_ref_compat.c h5_ref2reg_deprec.c \
h5_extlink.c h5_elink_unix2win.c h5_shared_mesg.c h5_debug_trace.c \
- ph5example.c \
+ ph5example.c ph5_filtered_writes.c ph5_filtered_writes_no_sel.c \
h5_vds.c h5_vds-exc.c h5_vds-exclim.c h5_vds-eiger.c h5_vds-simpleIO.c \
h5_vds-percival.c h5_vds-percival-unlim.c h5_vds-percival-unlim-maxmin.c
@@ -119,6 +119,8 @@ h5_reference_deprec: $(srcdir)/h5_reference_deprec.c
h5_ref2reg_deprec: $(srcdir)/h5_ref2reg_deprec.c
h5_drivers: $(srcdir)/h5_drivers.c
ph5example: $(srcdir)/ph5example.c
+ph5_filtered_writes: $(srcdir)/ph5_filtered_writes.c
+ph5_filtered_writes_no_sel: $(srcdir)/ph5_filtered_writes_no_sel.c
h5_dtransform: $(srcdir)/h5_dtransform.c
h5_extlink: $(srcdir)/h5_extlink.c $(EXTLINK_DIRS)
h5_elink_unix2win: $(srcdir)/h5_elink_unix2win.c $(EXTLINK_DIRS)
diff --git a/examples/h5_extlink.c b/examples/h5_extlink.c
index f9d4046..61fd589 100644
--- a/examples/h5_extlink.c
+++ b/examples/h5_extlink.c
@@ -414,14 +414,14 @@ UD_hard_create(const char *link_name, hid_t loc_group, const void *udata, size_t
token = *((H5O_token_t *)udata);
- //! [H5Open_by_token_snip]
+ //! [H5Oopen_by_token_snip]
/* Open the object this link points to so that we can increment
* its reference count. This also ensures that the token passed
* in points to a real object (although this check is not perfect!) */
target_obj = H5Oopen_by_token(loc_group, token);
- //! [H5Open_by_token_snip]
+ //! [H5Oopen_by_token_snip]
if (target_obj < 0) {
ret_value = -1;
diff --git a/examples/ph5_filtered_writes.c b/examples/ph5_filtered_writes.c
new file mode 100644
index 0000000..0f399dd
--- /dev/null
+++ b/examples/ph5_filtered_writes.c
@@ -0,0 +1,490 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to write to datasets
+ * with filters applied to them.
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the file that
+ * this example creates will not be removed as the example finishes.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even need some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example parses the HDF5_PARAPREFIX environment variable for a prefix,
+ * if one is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
+
+#define EXAMPLE_FILE "ph5_filtered_writes.h5"
+#define EXAMPLE_DSET1_NAME "DSET1"
+#define EXAMPLE_DSET2_NAME "DSET2"
+
+#define EXAMPLE_DSET_DIMS 2
+#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
+
+/* Dataset datatype */
+#define HDF5_DATATYPE H5T_NATIVE_INT
+typedef int C_DATATYPE;
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* Global variables */
+int mpi_rank, mpi_size;
+
+/*
+ * Routine to set an HDF5 filter on the given DCPL
+ */
+static void
+set_filter(hid_t dcpl_id)
+{
+ htri_t filter_avail;
+
+ /*
+ * Check if 'deflate' filter is available
+ */
+ filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
+ if (filter_avail < 0)
+ return;
+ else if (filter_avail) {
+ /*
+ * Set 'deflate' filter with reasonable
+ * compression level on DCPL
+ */
+ H5Pset_deflate(dcpl_id, 6);
+ }
+ else {
+ /*
+ * Set Fletcher32 checksum filter on DCPL
+ * since it is always available in HDF5
+ */
+ H5Pset_fletcher32(dcpl_id);
+ }
+}
+
+/*
+ * Routine to fill a data buffer with data. Assumes
+ * dimension rank is 2 and data is stored contiguous.
+ */
+void
+fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
+{
+ C_DATATYPE *dataptr = data;
+ hsize_t i, j;
+
+ /* Use MPI rank value for data */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = mpi_rank;
+ }
+ }
+}
+
+/* Cleanup created file */
+static void
+cleanup(char *filename)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ MPI_File_delete(filename, MPI_INFO_NULL);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where no chunks in the dataset are written
+ * to by more than 1 MPI rank. This will
+ * generally give the best performance as the
+ * MPI ranks will need the least amount of
+ * inter-process communication.
+ */
+static void
+write_dataset_no_overlap(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ size_t i, j;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of 4 chunks
+ * per MPI rank. The first dataset dimension
+ * scales according to the number of MPI ranks.
+ * The second dataset dimension stays fixed
+ * according to the chunk size.
+ */
+ dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
+ dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET1_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Each MPI rank's selection covers a
+ * single chunk in the first dataset
+ * dimension. Each MPI rank's selection
+ * covers 4 chunks in the second dataset
+ * dimension. This leads to each MPI rank
+ * writing to 4 chunks of the dataset.
+ */
+ start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ fill_databuf(start, count, stride, &data[0][0]);
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where every chunk in the dataset is written
+ * to by every MPI rank. This will generally
+ * give the worst performance as the MPI ranks
+ * will need the most amount of inter-process
+ * communication.
+ */
+static void
+write_dataset_overlap(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ size_t i, j;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * mpi_size x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = mpi_size;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of N chunks,
+ * where N is the number of MPI ranks. The
+ * first dataset dimension scales according
+ * to the number of MPI ranks. The second
+ * dataset dimension stays fixed according
+ * to the chunk size.
+ */
+ dataset_dims[0] = mpi_size * chunk_dims[0];
+ dataset_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET2_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Each MPI rank's selection covers
+ * part of every chunk in the first
+ * dimension. Each MPI rank's selection
+ * covers all of every chunk in the
+ * second dimension. This leads to
+ * each MPI rank writing an equal
+ * amount of data to every chunk
+ * in the dataset.
+ */
+ start[0] = mpi_rank;
+ start[1] = 0;
+ stride[0] = chunk_dims[0];
+ stride[1] = 1;
+ count[0] = mpi_size;
+ count[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ data = malloc(mpi_size * EXAMPLE_DSET_CHUNK_DIM_SIZE * sizeof(C_DATATYPE));
+
+ fill_databuf(start, count, stride, data);
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ free(data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ char * par_prefix = NULL;
+ char filename[PATH_MAX];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * ----------------------------------
+ * Start parallel access to HDF5 file
+ * ----------------------------------
+ */
+
+ /* Setup File Access Property List with parallel I/O access */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl_id, comm, info);
+
+ /*
+ * OPTIONAL: Set collective metadata reads on FAPL to allow
+ * parallel writes to filtered datasets to perform
+ * better at scale. While not strictly necessary,
+ * this is generally recommended.
+ */
+ H5Pset_all_coll_metadata_ops(fapl_id, true);
+
+ /*
+ * OPTIONAL: Set the latest file format version for HDF5 in
+ * order to gain access to different dataset chunk
+ * index types and better data encoding methods.
+ * While not strictly necessary, this is generally
+ * recommended.
+ */
+ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
+
+ /* Create HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ /*
+ * --------------------------------------
+ * Setup Dataset Transfer Property List
+ * with collective I/O
+ * --------------------------------------
+ */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ /*
+ * REQUIRED: Setup collective I/O for the dataset
+ * write operations. Parallel writes to
+ * filtered datasets MUST be collective,
+ * even if some ranks have no data to
+ * contribute to the write operation.
+ *
+ * Refer to the 'ph5_filtered_writes_no_sel'
+ * example to see how to setup a dataset
+ * write when one or more MPI ranks have
+ * no data to contribute to the write
+ * operation.
+ */
+ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * --------------------------------
+ * Create and write to each dataset
+ * --------------------------------
+ */
+
+ /*
+ * Write to a dataset in a fashion where no
+ * chunks in the dataset are written to by
+ * more than 1 MPI rank. This will generally
+ * give the best performance as the MPI ranks
+ * will need the least amount of inter-process
+ * communication.
+ */
+ write_dataset_no_overlap(file_id, dxpl_id);
+
+ /*
+ * Write to a dataset in a fashion where
+ * every chunk in the dataset is written
+ * to by every MPI rank. This will generally
+ * give the worst performance as the MPI ranks
+ * will need the most amount of inter-process
+ * communication.
+ */
+ write_dataset_overlap(file_id, dxpl_id);
+
+ /*
+ * ------------------
+ * Close all HDF5 IDs
+ * ------------------
+ */
+
+ H5Pclose(dxpl_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+
+ printf("PHDF5 example finished with no errors\n");
+
+ /*
+ * ------------------------------------
+ * Cleanup created HDF5 file and finish
+ * ------------------------------------
+ */
+
+ cleanup(filename);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+int
+main(void)
+{
+ printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
+ return 0;
+}
+
+#endif
diff --git a/examples/ph5_filtered_writes_no_sel.c b/examples/ph5_filtered_writes_no_sel.c
new file mode 100644
index 0000000..2e29838
--- /dev/null
+++ b/examples/ph5_filtered_writes_no_sel.c
@@ -0,0 +1,370 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Example of using the parallel HDF5 library to collectively write to
+ * datasets with filters applied to them when one or MPI ranks do not
+ * have data to contribute to the dataset.
+ *
+ * If the HDF5_NOCLEANUP environment variable is set, the file that
+ * this example creates will not be removed as the example finishes.
+ *
+ * The need of requirement of parallel file prefix is that in general
+ * the current working directory in which compiling is done, is not suitable
+ * for parallel I/O and there is no standard pathname for parallel file
+ * systems. In some cases, the parallel file name may even need some
+ * parallel file type prefix such as: "pfs:/GF/...". Therefore, this
+ * example parses the HDF5_PARAPREFIX environment variable for a prefix,
+ * if one is needed.
+ */
+
+#include <stdlib.h>
+
+#include "hdf5.h"
+
+#if defined(H5_HAVE_PARALLEL) && defined(H5_HAVE_PARALLEL_FILTERED_WRITES)
+
+#define EXAMPLE_FILE "ph5_filtered_writes_no_sel.h5"
+#define EXAMPLE_DSET_NAME "DSET"
+
+#define EXAMPLE_DSET_DIMS 2
+#define EXAMPLE_DSET_CHUNK_DIM_SIZE 10
+
+/* Dataset datatype */
+#define HDF5_DATATYPE H5T_NATIVE_INT
+typedef int C_DATATYPE;
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif
+
+/* Global variables */
+int mpi_rank, mpi_size;
+
+/*
+ * Routine to set an HDF5 filter on the given DCPL
+ */
+static void
+set_filter(hid_t dcpl_id)
+{
+ htri_t filter_avail;
+
+ /*
+ * Check if 'deflate' filter is available
+ */
+ filter_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE);
+ if (filter_avail < 0)
+ return;
+ else if (filter_avail) {
+ /*
+ * Set 'deflate' filter with reasonable
+ * compression level on DCPL
+ */
+ H5Pset_deflate(dcpl_id, 6);
+ }
+ else {
+ /*
+ * Set Fletcher32 checksum filter on DCPL
+ * since it is always available in HDF5
+ */
+ H5Pset_fletcher32(dcpl_id);
+ }
+}
+
+/*
+ * Routine to fill a data buffer with data. Assumes
+ * dimension rank is 2 and data is stored contiguous.
+ */
+void
+fill_databuf(hsize_t start[], hsize_t count[], hsize_t stride[], C_DATATYPE *data)
+{
+ C_DATATYPE *dataptr = data;
+ hsize_t i, j;
+
+ /* Use MPI rank value for data */
+ for (i = 0; i < count[0]; i++) {
+ for (j = 0; j < count[1]; j++) {
+ *dataptr++ = mpi_rank;
+ }
+ }
+}
+
+/* Cleanup created file */
+static void
+cleanup(char *filename)
+{
+ hbool_t do_cleanup = getenv(HDF5_NOCLEANUP) ? 0 : 1;
+
+ if (do_cleanup)
+ MPI_File_delete(filename, MPI_INFO_NULL);
+}
+
+/*
+ * Routine to write to a dataset in a fashion
+ * where no chunks in the dataset are written
+ * to by more than 1 MPI rank. This will
+ * generally give the best performance as the
+ * MPI ranks will need the least amount of
+ * inter-process communication.
+ */
+static void
+write_dataset_some_no_sel(hid_t file_id, hid_t dxpl_id)
+{
+ C_DATATYPE data[EXAMPLE_DSET_CHUNK_DIM_SIZE][4 * EXAMPLE_DSET_CHUNK_DIM_SIZE];
+ hsize_t dataset_dims[EXAMPLE_DSET_DIMS];
+ hsize_t chunk_dims[EXAMPLE_DSET_DIMS];
+ hsize_t start[EXAMPLE_DSET_DIMS];
+ hsize_t stride[EXAMPLE_DSET_DIMS];
+ hsize_t count[EXAMPLE_DSET_DIMS];
+ hbool_t no_selection;
+ size_t i, j;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t file_dataspace = H5I_INVALID_HID;
+
+ /*
+ * ------------------------------------
+ * Setup Dataset Creation Property List
+ * ------------------------------------
+ */
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ /*
+ * REQUIRED: Dataset chunking must be enabled to
+ * apply a data filter to the dataset.
+ * Chunks in the dataset are of size
+ * EXAMPLE_DSET_CHUNK_DIM_SIZE x EXAMPLE_DSET_CHUNK_DIM_SIZE.
+ */
+ chunk_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ chunk_dims[1] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ H5Pset_chunk(dcpl_id, EXAMPLE_DSET_DIMS, chunk_dims);
+
+ /* Set filter to be applied to created datasets */
+ set_filter(dcpl_id);
+
+ /*
+ * ------------------------------------
+ * Define the dimensions of the dataset
+ * and create it
+ * ------------------------------------
+ */
+
+ /*
+ * Create a dataset composed of 4 chunks
+ * per MPI rank. The first dataset dimension
+ * scales according to the number of MPI ranks.
+ * The second dataset dimension stays fixed
+ * according to the chunk size.
+ */
+ dataset_dims[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE * mpi_size;
+ dataset_dims[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ file_dataspace = H5Screate_simple(EXAMPLE_DSET_DIMS, dataset_dims, NULL);
+
+ /* Create the dataset */
+ dset_id = H5Dcreate2(file_id, EXAMPLE_DSET_NAME, HDF5_DATATYPE, file_dataspace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT);
+
+ /*
+ * ------------------------------------
+ * Setup selection in the dataset for
+ * each MPI rank
+ * ------------------------------------
+ */
+
+ /*
+ * Odd rank value MPI ranks do not
+ * contribute any data to the dataset.
+ */
+ no_selection = (mpi_rank % 2) == 1;
+
+ if (no_selection) {
+ /*
+ * MPI ranks not contributing data to
+ * the dataset should call H5Sselect_none
+ * on the file dataspace that will be
+ * passed to H5Dwrite.
+ */
+ H5Sselect_none(file_dataspace);
+ }
+ else {
+ /*
+ * Even MPI ranks contribute data to
+ * the dataset. Each MPI rank's selection
+ * covers a single chunk in the first dataset
+ * dimension. Each MPI rank's selection
+ * covers 4 chunks in the second dataset
+ * dimension. This leads to each contributing
+ * MPI rank writing to 4 chunks of the dataset.
+ */
+ start[0] = mpi_rank * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = EXAMPLE_DSET_CHUNK_DIM_SIZE;
+ count[1] = 4 * EXAMPLE_DSET_CHUNK_DIM_SIZE;
+
+ H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, NULL);
+
+ /*
+ * --------------------------------------
+ * Fill data buffer with MPI rank's rank
+ * value to make it easy to see which
+ * part of the dataset each rank wrote to
+ * --------------------------------------
+ */
+
+ fill_databuf(start, count, stride, &data[0][0]);
+ }
+
+ /*
+ * ---------------------------------
+ * Write to the dataset collectively
+ * ---------------------------------
+ */
+
+ H5Dwrite(dset_id, HDF5_DATATYPE, no_selection ? H5S_ALL : H5S_BLOCK, file_dataspace, dxpl_id, data);
+
+ /*
+ * --------------
+ * Close HDF5 IDs
+ * --------------
+ */
+
+ H5Sclose(file_dataspace);
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+}
+
+int
+main(int argc, char **argv)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ char * par_prefix = NULL;
+ char filename[PATH_MAX];
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /*
+ * ----------------------------------
+ * Start parallel access to HDF5 file
+ * ----------------------------------
+ */
+
+ /* Setup File Access Property List with parallel I/O access */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl_id, comm, info);
+
+ /*
+ * OPTIONAL: Set collective metadata reads on FAPL to allow
+ * parallel writes to filtered datasets to perform
+ * better at scale. While not strictly necessary,
+ * this is generally recommended.
+ */
+ H5Pset_all_coll_metadata_ops(fapl_id, true);
+
+ /*
+ * OPTIONAL: Set the latest file format version for HDF5 in
+ * order to gain access to different dataset chunk
+ * index types and better data encoding methods.
+ * While not strictly necessary, this is generally
+ * recommended.
+ */
+ H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+
+ /* Parse any parallel prefix and create filename */
+ par_prefix = getenv("HDF5_PARAPREFIX");
+
+ snprintf(filename, PATH_MAX, "%s%s%s", par_prefix ? par_prefix : "", par_prefix ? "/" : "", EXAMPLE_FILE);
+
+ /* Create HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ /*
+ * --------------------------------------
+ * Setup Dataset Transfer Property List
+ * with collective I/O
+ * --------------------------------------
+ */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ /*
+ * REQUIRED: Setup collective I/O for the dataset
+ * write operations. Parallel writes to
+ * filtered datasets MUST be collective,
+ * even if some ranks have no data to
+ * contribute to the write operation.
+ */
+ H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+
+ /*
+ * --------------------------------
+ * Create and write to the dataset
+ * --------------------------------
+ */
+
+ /*
+ * Write to a dataset in a fashion where no
+ * chunks in the dataset are written to by
+ * more than 1 MPI rank and some MPI ranks
+ * have nothing to contribute to the dataset.
+ * In this case, the MPI ranks that have no
+ * data to contribute must still participate
+ * in the collective H5Dwrite call, but should
+ * call H5Sselect_none on the file dataspace
+ * passed to the H5Dwrite call.
+ */
+ write_dataset_some_no_sel(file_id, dxpl_id);
+
+ /*
+ * ------------------
+ * Close all HDF5 IDs
+ * ------------------
+ */
+
+ H5Pclose(dxpl_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+
+ printf("PHDF5 example finished with no errors\n");
+
+ /*
+ * ------------------------------------
+ * Cleanup created HDF5 file and finish
+ * ------------------------------------
+ */
+
+ cleanup(filename);
+
+ MPI_Finalize();
+
+ return 0;
+}
+
+#else
+
+int
+main(void)
+{
+ printf("HDF5 not configured with parallel support or parallel filtered writes are disabled!\n");
+ return 0;
+}
+
+#endif
diff --git a/examples/ph5example.c b/examples/ph5example.c
index 87632e5..f16b055 100644
--- a/examples/ph5example.c
+++ b/examples/ph5example.c
@@ -888,6 +888,7 @@ test_split_comm_access(char filenames[][PATH_MAX])
mrc = MPI_File_delete(filenames[color], info);
assert(mrc == MPI_SUCCESS);
}
+ MPI_Comm_free(&comm);
}
/*
@@ -934,7 +935,7 @@ mkfilenames(char *prefix)
"Need to adjust the code to accommodate the large size.\n");
}
for (i = 0; i < n; i++) {
- sprintf(testfiles[i], "%s/ParaEg%d.h5", prefix, i);
+ snprintf(testfiles[i], PATH_MAX, "%s/ParaEg%d.h5", prefix, i);
}
return (0);
}
@@ -1073,11 +1074,11 @@ main(int argc, char **argv)
finish:
if (mpi_rank == 0) { /* only process 0 reports */
if (nerrors)
- printf("***PHDF5 tests detected %d errors***\n", nerrors);
+ printf("***PHDF5 example detected %d errors***\n", nerrors);
else {
- printf("===================================\n");
- printf("PHDF5 tests finished with no errors\n");
- printf("===================================\n");
+ printf("=====================================\n");
+ printf("PHDF5 example finished with no errors\n");
+ printf("=====================================\n");
}
}
if (docleanup)
diff --git a/fortran/src/H5Df.c b/fortran/src/H5Df.c
index 5c46cb3..a780683 100644
--- a/fortran/src/H5Df.c
+++ b/fortran/src/H5Df.c
@@ -495,10 +495,10 @@ h5dget_storage_size_c(hid_t_f *dset_id, hsize_t_f *size)
c_dset_id = (hid_t)*dset_id;
c_size = H5Dget_storage_size(c_dset_id);
- if (c_size == 0)
- return ret_value;
- *size = (hsize_t_f)c_size;
- ret_value = 0;
+ if (c_size != 0) {
+ ret_value = 0;
+ }
+ *size = (hsize_t_f)c_size;
return ret_value;
}
diff --git a/fortran/src/H5Dff.F90 b/fortran/src/H5Dff.F90
index 655cf1c..6b77a8c 100644
--- a/fortran/src/H5Dff.F90
+++ b/fortran/src/H5Dff.F90
@@ -1625,8 +1625,7 @@ CONTAINS
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
-! buf - data buffer iin memory ro apply selection to
-! - of k-th dimension of the buf array
+! buf - memory buffer containing the selection to be filled
! Outputs:
! hdferr: - error code
! Success: 0
@@ -1676,8 +1675,7 @@ CONTAINS
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
-! buf - data buffer iin memory ro apply selection to
-! - of k-th dimension of the buf array
+! buf - memory buffer containing the selection to be filled
! Outputs:
! hdferr: - error code
! Success: 0
@@ -1724,8 +1722,7 @@ CONTAINS
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
- ! buf - data buffer iin memory ro apply selection to
- ! - of k-th dimension of the buf array
+ ! buf - memory buffer containing the selection to be filled
! Outputs:
! hdferr: - error code
! Success: 0
@@ -1799,8 +1796,7 @@ CONTAINS
! Inputs:
! fill_value - fill value
! space_id - memory space selection identifier
-! buf - data buffer iin memory ro apply selection to
-! - of k-th dimension of the buf array
+! buf - memory buffer containing the selection to be filled
! Outputs:
! hdferr: - error code
! Success: 0
diff --git a/fortran/src/H5Ff.c b/fortran/src/H5Ff.c
index f943200..339f8b7 100644
--- a/fortran/src/H5Ff.c
+++ b/fortran/src/H5Ff.c
@@ -583,7 +583,7 @@ h5fget_name_c(hid_t_f *obj_id, size_t_f *size, _fcd buf, size_t_f *buflen)
int_f ret_value = 0; /* Return value */
/*
- * Allocate buffer to hold name of an attribute
+ * Allocate buffer to hold name of file
*/
if (NULL == (c_buf = (char *)HDmalloc((size_t)*buflen + 1)))
HGOTO_DONE(FAIL);
@@ -591,7 +591,7 @@ h5fget_name_c(hid_t_f *obj_id, size_t_f *size, _fcd buf, size_t_f *buflen)
/*
* Call H5Fget_name function
*/
- if ((size_c = H5Fget_name((hid_t)*obj_id, c_buf, (size_t)*buflen)) < 0)
+ if ((size_c = H5Fget_name((hid_t)*obj_id, c_buf, (size_t)*buflen + 1)) < 0)
HGOTO_DONE(FAIL);
/*
diff --git a/fortran/test/tH5A_1_8.F90 b/fortran/test/tH5A_1_8.F90
index b245b1c..cd8a981 100644
--- a/fortran/test/tH5A_1_8.F90
+++ b/fortran/test/tH5A_1_8.F90
@@ -776,7 +776,7 @@ SUBROUTINE test_attr_info_by_idx(new_format, fcpl, fapl, total_error)
INTEGER :: Input1
INTEGER(HSIZE_T) :: hzero = 0_HSIZE_T
- INTEGER :: minusone = -1
+ INTEGER, PARAMETER :: minusone = -1
data_dims = 0
@@ -1422,7 +1422,7 @@ SUBROUTINE test_attr_delete_by_idx(new_format, fcpl, fapl, total_error)
INTEGER :: u ! Local index variable
INTEGER :: Input1
INTEGER(HSIZE_T) :: hzero = 0_HSIZE_T
- INTEGER :: minusone = -1
+ INTEGER, PARAMETER :: minusone = -1
data_dims = 0
@@ -2263,7 +2263,7 @@ SUBROUTINE test_attr_corder_create_basic( fcpl, fapl, total_error )
INTEGER :: error
INTEGER :: crt_order_flags
- INTEGER :: minusone = -1
+ INTEGER, PARAMETER :: minusone = -1
! Output message about test being performed
! WRITE(*,*) " - Testing Basic Code for Attributes with Creation Order Info"
diff --git a/fortran/test/tH5F.F90 b/fortran/test/tH5F.F90
index 3affed0..8d4845d 100644
--- a/fortran/test/tH5F.F90
+++ b/fortran/test/tH5F.F90
@@ -584,17 +584,23 @@ CONTAINS
! The following subroutine checks that h5fget_name_f produces
! correct output for a given obj_id and filename.
!
- SUBROUTINE check_get_name(obj_id, fix_filename, total_error)
+ SUBROUTINE check_get_name(obj_id, fix_filename, len_filename, total_error)
USE HDF5 ! This module contains all necessary modules
USE TH5_MISC
IMPLICIT NONE
INTEGER(HID_T) :: obj_id ! Object identifier
CHARACTER(LEN=80), INTENT(IN) :: fix_filename ! Expected filename
+ INTEGER, INTENT(IN) :: len_filename ! The length of the filename
INTEGER, INTENT(INOUT) :: total_error ! Error count
CHARACTER(LEN=80):: file_name ! Filename buffer
INTEGER:: error ! HDF5 error code
INTEGER(SIZE_T):: name_size ! Filename length
+
+ INTEGER, PARAMETER :: sm_len = 2
+ CHARACTER(LEN=len_filename) :: filename_exact
+ CHARACTER(LEN=len_filename-sm_len) :: filename_sm
+
!
!Get file name from the dataset identifier
!
@@ -637,6 +643,30 @@ CONTAINS
total_error = total_error + 1
END IF
+ ! Use a buffer which is the exact size needed to hold the filename
+ CALL h5fget_name_f(obj_id, filename_exact, name_size, error)
+ CALL check("h5fget_name_f",error,total_error)
+ IF(name_size .NE. len_filename)THEN
+ WRITE(*,*) " file name size obtained from the object id is incorrect"
+ total_error = total_error + 1
+ ENDIF
+ IF(filename_exact .NE. TRIM(fix_filename)) THEN
+ WRITE(*,*) " file name obtained from the object id is incorrect"
+ total_error = total_error + 1
+ END IF
+
+ ! Use a buffer which is smaller than needed to hold the filename
+ CALL h5fget_name_f(obj_id, filename_sm, name_size, error)
+ CALL check("h5fget_name_f",error,total_error)
+ IF(name_size .NE. len_filename)THEN
+ WRITE(*,*) " file name size obtained from the object id is incorrect"
+ total_error = total_error + 1
+ ENDIF
+ IF(filename_sm(1:len_filename-sm_len) .NE. fix_filename(1:len_filename-sm_len)) THEN
+ WRITE(*,*) " file name obtained from the object id is incorrect"
+ total_error = total_error + 1
+ END IF
+
END SUBROUTINE check_get_name
! The following subroutine tests h5fget_name_f.
@@ -653,6 +683,7 @@ CONTAINS
CHARACTER(LEN=*), PARAMETER :: filename = "filename"
CHARACTER(LEN=80) :: fix_filename
+ INTEGER :: len_filename
INTEGER(HID_T) :: file_id ! File identifier
INTEGER(HID_T) :: g_id ! Group identifier
@@ -679,8 +710,9 @@ CONTAINS
CALL h5gopen_f(file_id,"/",g_id, error)
CALL check("h5gopen_f",error,total_error)
- CALL check_get_name(file_id, fix_filename, total_error)
- CALL check_get_name(g_id, fix_filename, total_error)
+ len_filename = LEN_TRIM(fix_filename)
+ CALL check_get_name(file_id, fix_filename, len_filename, total_error)
+ CALL check_get_name(g_id, fix_filename, len_filename, total_error)
! Close the group.
!
diff --git a/fortran/test/tH5P.F90 b/fortran/test/tH5P.F90
index 75e4e72..d664dd7 100644
--- a/fortran/test/tH5P.F90
+++ b/fortran/test/tH5P.F90
@@ -541,7 +541,7 @@ SUBROUTINE test_chunk_cache(cleanup, total_error)
CALL H5Dclose_f(dsid, error)
CALL H5Oopen_f(fid, "dset", dsid, error, dapl1)
- ! Retrieve dapl from dataset, verfiy cache values are the same as on dapl1
+ ! Retrieve dapl from dataset, verify cache values are the same as on dapl1
!
! Note we rely on the knowledge that H5Pget_chunk_cache retrieves these
! values directly from the dataset structure, and not from a copy of the
@@ -563,7 +563,7 @@ SUBROUTINE test_chunk_cache(cleanup, total_error)
CALL H5Oopen_f(fid, "dset", dsid, error)
CALL check("H5Oopen_f", error, total_error)
- ! Retrieve dapl from dataset, verfiy cache values are the same as on fapl_local
+ ! Retrieve dapl from dataset, verify cache values are the same as on fapl_local
CALL H5Dget_access_plist_f(dsid, dapl2, error)
CALL check("H5Dget_access_plist_f", error, total_error)
diff --git a/hl/c++/test/ptableTest.cpp b/hl/c++/test/ptableTest.cpp
index 9db56e0..33199f1 100644
--- a/hl/c++/test/ptableTest.cpp
+++ b/hl/c++/test/ptableTest.cpp
@@ -622,7 +622,7 @@ TestHDFFV_9758()
s1[i].a = static_cast<int>(i);
s1[i].b = 1.0F * static_cast<float>(i * i);
s1[i].c = 1.0 / static_cast<double>(i + 1);
- HDsprintf(s1[i].d, "string%" PRIuHSIZE "", i);
+ HDsnprintf(s1[i].d, STRING_LENGTH, "string%" PRIuHSIZE "", i);
s1[i].e = static_cast<int>(100 + i);
}
diff --git a/hl/test/test_file_image.c b/hl/test/test_file_image.c
index 6bb36e4..4448e41 100644
--- a/hl/test/test_file_image.c
+++ b/hl/test/test_file_image.c
@@ -109,12 +109,13 @@ test_file_image(size_t open_images, size_t nflags, const unsigned *flags)
input_flags[i] = flags[(nflags + i) % nflags];
/* allocate name buffer for image i */
- filename[i] = (char *)HDmalloc(sizeof(char) * 32);
+ size_t filenamelength = sizeof(char) * 32;
+ filename[i] = (char *)HDmalloc(filenamelength);
if (!filename[i])
FAIL_PUTS_ERROR("HDmalloc() failed");
/* create file name */
- HDsprintf(filename[i], "image_file%d.h5", (int)i);
+ HDsnprintf(filename[i], filenamelength, "image_file%d.h5", (int)i);
/* create file */
if ((file_id[i] = H5Fcreate(filename[i], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
diff --git a/hl/test/test_image.c b/hl/test/test_image.c
index 2251a04..4997df6 100644
--- a/hl/test/test_image.c
+++ b/hl/test/test_image.c
@@ -650,7 +650,7 @@ test_generate(void)
HL_TESTING2("make indexed image from land data");
for (i = 0; i < n_elements; i++) {
- if (data[i] < 0.0f)
+ if (data[i] < 0.0F)
image_data[i] = 0;
else
image_data[i] = (unsigned char)((255 * data[i]) / xmax);
@@ -671,10 +671,10 @@ test_generate(void)
HL_TESTING2("make indexed image from sea data");
for (i = 0; i < n_elements; i++) {
- if (data[i] > 0.0f)
+ if (data[i] > 0.0F)
image_data[i] = 0;
else {
- image_data[i] = (unsigned char)((255.0f * (data[i] - xmin)) / (xmax - xmin));
+ image_data[i] = (unsigned char)((255.0F * (data[i] - xmin)) / (xmax - xmin));
}
}
diff --git a/hl/test/test_packet.c b/hl/test/test_packet.c
index 5f30d4b..e8b90f7 100644
--- a/hl/test/test_packet.c
+++ b/hl/test/test_packet.c
@@ -47,7 +47,7 @@ typedef struct particle_t {
*/
static particle_t testPart[NRECORDS] = {{"zero", 0, 0, 0.0F, 0.0}, {"one", 10, 10, 1.0F, 10.0},
{"two", 20, 20, 2.0F, 20.0}, {"three", 30, 30, 3.0F, 30.0},
- {"Four", 40, 40, 4.0F, 40.0}, {"Five", 50, 50, 5.0F, 50.0},
+ {"four", 40, 40, 4.0F, 40.0}, {"five", 50, 50, 5.0F, 50.0},
{"six", 60, 60, 6.0F, 60.0}, {"seven", 70, 70, 7.0F, 70.0}};
/*-------------------------------------------------------------------------
diff --git a/hl/test/test_packet_vlen.c b/hl/test/test_packet_vlen.c
index e704944..aa88a6c 100644
--- a/hl/test/test_packet_vlen.c
+++ b/hl/test/test_packet_vlen.c
@@ -109,7 +109,7 @@ test_VLof_atomic(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -246,7 +246,7 @@ test_VLof_comptype(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -418,7 +418,7 @@ test_compound_VL_VLtype(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -582,7 +582,7 @@ test_VLof_VLtype(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1144,7 +1144,7 @@ testfl_VLof_atomic(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1281,7 +1281,7 @@ testfl_VLof_comptype(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1453,7 +1453,7 @@ testfl_compound_VL_VLtype(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
@@ -1617,7 +1617,7 @@ testfl_VLof_VLtype(void)
if (ret < 0)
goto error;
- HDsprintf(msg, "The number of packets in the packet table must be %u\n", NRECORDS);
+ HDsnprintf(msg, sizeof(msg), "The number of packets in the packet table must be %u\n", NRECORDS);
VERIFY(count == NRECORDS, msg);
/* Read all five packets back */
diff --git a/hl/tools/gif2h5/gif2mem.c b/hl/tools/gif2h5/gif2mem.c
index 2c2225e..44e18e0 100644
--- a/hl/tools/gif2h5/gif2mem.c
+++ b/hl/tools/gif2h5/gif2mem.c
@@ -49,22 +49,22 @@ Gif2Mem(GIFBYTE *MemGif, GIFTOMEM *GifMemoryStruct)
GIFCOMMENT ** gifComment; /* Comment Extension structure */
GIFGRAPHICCONTROL **gifGraphicControl; /* Graphic Control Extension strct */
- register GIFWORD i; /* Loop counter */
- GIFBYTE Identifier; /* Extension block identifier holder */
- GIFBYTE Label; /* Extension block label holder */
- GIFBYTE ImageCount; /* Count of the number of images in the file */
- GIFBYTE ImageArray; /* Keep the size of the array to store Images */
- GIFBYTE CommentCount;
- GIFBYTE CommentArray;
- GIFBYTE ApplicationCount;
- GIFBYTE ApplicationArray;
- GIFBYTE PlainTextCount;
- GIFBYTE PlainTextArray;
- GIFBYTE GCEflag;
- GIFBYTE aTemp;
- GIFBYTE j;
- GIFBYTE w; /* Two more variables needed only while testing */
- GIFBYTE * b; /* Endian Ordering */
+ GIFWORD i; /* Loop counter */
+ GIFBYTE Identifier; /* Extension block identifier holder */
+ GIFBYTE Label; /* Extension block label holder */
+ GIFBYTE ImageCount; /* Count of the number of images in the file */
+ GIFBYTE ImageArray; /* Keep the size of the array to store Images */
+ GIFBYTE CommentCount;
+ GIFBYTE CommentArray;
+ GIFBYTE ApplicationCount;
+ GIFBYTE ApplicationArray;
+ GIFBYTE PlainTextCount;
+ GIFBYTE PlainTextArray;
+ GIFBYTE GCEflag;
+ GIFBYTE aTemp;
+ GIFBYTE j;
+ GIFBYTE w; /* Two more variables needed only while testing */
+ GIFBYTE *b; /* Endian Ordering */
/* Allocate memory for the GIF structures */
/* Plug the structs into GifMemoryStruct at the end */
diff --git a/hl/tools/gif2h5/hdfgifwr.c b/hl/tools/gif2h5/hdfgifwr.c
index 21b14d1..7126da9 100644
--- a/hl/tools/gif2h5/hdfgifwr.c
+++ b/hl/tools/gif2h5/hdfgifwr.c
@@ -188,13 +188,13 @@ hdfWriteGIF(FILE *fp, byte *pic, int ptype, int w, int h, const byte *rmap, cons
static void
compress(int init_bits, FILE *outfile, byte *data, int len)
{
- register long fcode;
- register int i = 0;
- register int c;
- register int ent;
- register int disp;
- register int hsize_reg;
- register int hshift;
+ long fcode;
+ int i = 0;
+ int c;
+ int ent;
+ int disp;
+ int hsize_reg;
+ int hshift;
/*
* Set up the globals: g_init_bits - initial number of bits g_outfile -
diff --git a/hl/tools/gif2h5/writehdf.c b/hl/tools/gif2h5/writehdf.c
index c321519..b49e2a6 100644
--- a/hl/tools/gif2h5/writehdf.c
+++ b/hl/tools/gif2h5/writehdf.c
@@ -100,7 +100,7 @@ WriteHDF(GIFTOMEM GifMemoryStruct, char *HDFName)
dims[1] = gifImageDesc->ImageWidth;
/* create the image name */
- sprintf(ImageName, "Image%d", i);
+ snprintf(ImageName, sizeof(ImageName), "Image%d", i);
/* write image */
if (H5IMmake_image_8bit(file_id, ImageName, dims[1], dims[0], (gifImageDesc->Image)) < 0)
diff --git a/hl/tools/h5watch/extend_dset.c b/hl/tools/h5watch/extend_dset.c
index 624eebc..7756506 100644
--- a/hl/tools/h5watch/extend_dset.c
+++ b/hl/tools/h5watch/extend_dset.c
@@ -392,7 +392,7 @@ error:
***********************************************************************
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
char *dname = NULL;
char *fname = NULL;
diff --git a/hl/tools/h5watch/h5watch.c b/hl/tools/h5watch/h5watch.c
index c897c91..5a6fe6f 100644
--- a/hl/tools/h5watch/h5watch.c
+++ b/hl/tools/h5watch/h5watch.c
@@ -51,7 +51,7 @@ static herr_t process_cmpd_fields(hid_t fid, char *dsetname);
static herr_t check_dataset(hid_t fid, char *dsetname);
static void leave(int ret);
static void usage(const char *prog);
-static void parse_command_line(int argc, const char *argv[]);
+static void parse_command_line(int argc, const char *const *argv);
/*
* Command-line options: The user can only specify long-named parameters.
@@ -174,9 +174,9 @@ doprint(hid_t did, const hsize_t *start, const hsize_t *block, int rank)
} /* end else */
/* Floating point types should display full precision */
- sprintf(fmt_float, "%%1.%dg", FLT_DIG);
+ snprintf(fmt_float, sizeof(fmt_float), "%%1.%dg", FLT_DIG);
info.fmt_float = fmt_float;
- sprintf(fmt_double, "%%1.%dg", DBL_DIG);
+ snprintf(fmt_double, sizeof(fmt_double), "%%1.%dg", DBL_DIG);
info.fmt_double = fmt_double;
info.dset_format = "DSET-%s ";
@@ -665,7 +665,7 @@ usage(const char *prog)
*-------------------------------------------------------------------------
*/
static void
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
int opt; /* Command line option */
int tmp;
@@ -790,7 +790,7 @@ catch_signal(int H5_ATTR_UNUSED signo)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
char drivername[50]; /* VFD name */
char *fname = NULL; /* File name */
@@ -819,7 +819,7 @@ main(int argc, const char *argv[])
}
/* parse command line options */
- parse_command_line(argc, argv);
+ parse_command_line(argc, (const char *const *)argv);
if (argc <= H5_optind) {
error_msg("missing dataset name\n");
diff --git a/java/examples/datasets/H5Ex_D_Sofloat.java b/java/examples/datasets/H5Ex_D_Sofloat.java
index a42aba4..8edde09 100644
--- a/java/examples/datasets/H5Ex_D_Sofloat.java
+++ b/java/examples/datasets/H5Ex_D_Sofloat.java
@@ -21,6 +21,9 @@
************************************************************/
package examples.datasets;
+import java.text.DecimalFormat;
+import java.text.DecimalFormatSymbols;
+import java.util.Locale;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
@@ -123,8 +126,9 @@ public class H5Ex_D_Sofloat {
}
// Print the maximum value.
- System.out.println("Maximum value in write buffer is: " + max);
- System.out.println("Minimum value in write buffer is: " + min);
+ DecimalFormat df = new DecimalFormat("#,##0.000000", new DecimalFormatSymbols(Locale.US));
+ System.out.println("Maximum value in write buffer is: " + df.format(max));
+ System.out.println("Minimum value in write buffer is: " + df.format(min));
// Create a new file using the default properties.
try {
@@ -308,8 +312,9 @@ public class H5Ex_D_Sofloat {
}
// Print the maximum value.
- System.out.println("Maximum value in " + DATASETNAME + " is: " + max);
- System.out.println("Minimum value in " + DATASETNAME + " is: " + min);
+ DecimalFormat df = new DecimalFormat("#,##0.000000", new DecimalFormatSymbols(Locale.US));
+ System.out.println("Maximum value in " + DATASETNAME + " is: " + df.format(max));
+ System.out.println("Minimum value in " + DATASETNAME + " is: " + df.format(min));
// End access to the dataset and release resources used by it.
try {
diff --git a/java/examples/datasets/JavaDatasetExample.sh.in b/java/examples/datasets/JavaDatasetExample.sh.in
index f29739a..9a5ddd8 100644
--- a/java/examples/datasets/JavaDatasetExample.sh.in
+++ b/java/examples/datasets/JavaDatasetExample.sh.in
@@ -57,8 +57,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
"
LIST_JAR_TESTFILES="
-$HDFLIB_HOME/slf4j-api-1.7.25.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
+$HDFLIB_HOME/slf4j-api-1.7.33.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar
"
LIST_DATA_FILES="
$HDFTEST_HOME/../testfiles/examples.datasets.H5Ex_D_Alloc.txt
@@ -220,7 +220,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/datasets/Makefile.am b/java/examples/datasets/Makefile.am
index 41a914b..8b71ced 100644
--- a/java/examples/datasets/Makefile.am
+++ b/java/examples/datasets/Makefile.am
@@ -27,7 +27,7 @@ classes:
pkgpath = examples/datasets
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)datasets.jar
diff --git a/java/examples/datatypes/H5Ex_T_ObjectReference.java b/java/examples/datatypes/H5Ex_T_ObjectReference.java
index 38536b8..b0f98de 100644
--- a/java/examples/datatypes/H5Ex_T_ObjectReference.java
+++ b/java/examples/datatypes/H5Ex_T_ObjectReference.java
@@ -70,7 +70,7 @@ public class H5Ex_T_ObjectReference {
long group_id = HDF5Constants.H5I_INVALID_HID;
long dataset_id = HDF5Constants.H5I_INVALID_HID;
long[] dims = { DIM0 };
- byte[][] dset_data = new byte[DIM0][8];
+ byte[][] dset_data = new byte[DIM0][HDF5Constants.H5R_REF_BUF_SIZE];
// Create a new file using default properties.
try {
@@ -111,50 +111,58 @@ public class H5Ex_T_ObjectReference {
e.printStackTrace();
}
- // Create references to the previously created objects. Passing -1
- // as space_id causes this parameter to be ignored. Other values
- // besides valid dataspaces result in an error.
try {
if (file_id >= 0) {
- byte rbuf0[] = H5.H5Rcreate(file_id, GROUPNAME, HDF5Constants.H5R_OBJECT, -1);
- byte rbuf1[] = H5.H5Rcreate(file_id, DATASETNAME2, HDF5Constants.H5R_OBJECT, -1);
- for (int indx = 0; indx < 8; indx++) {
- dset_data[0][indx] = rbuf0[indx];
- dset_data[1][indx] = rbuf1[indx];
+ try {
+ dset_data[0] = H5.H5Rcreate_object(file_id, GROUPNAME, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ }
+
+ try {
+ dset_data[1] = H5.H5Rcreate_object(file_id, DATASETNAME2, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
}
}
+
+ // Create dataspace. Setting maximum size to NULL sets the maximum
+ // size to be the current size.
+ try {
+ filespace_id = H5.H5Screate_simple(RANK, dims, null);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ // Create the dataset.
+ try {
+ if ((file_id >= 0) && (filespace_id >= 0))
+ dataset_id = H5.H5Dcreate(file_id, DATASETNAME, HDF5Constants.H5T_STD_REF, filespace_id,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ // Write the object references to it.
+ try {
+ if (dataset_id >= 0)
+ H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF, HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
+ HDF5Constants.H5P_DEFAULT, dset_data);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
}
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Create dataspace. Setting maximum size to NULL sets the maximum
- // size to be the current size.
- try {
- filespace_id = H5.H5Screate_simple(RANK, dims, null);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Create the dataset.
- try {
- if ((file_id >= 0) && (filespace_id >= 0))
- dataset_id = H5.H5Dcreate(file_id, DATASETNAME, HDF5Constants.H5T_STD_REF_OBJ, filespace_id,
- HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Write the object references to it.
- try {
- if (dataset_id >= 0)
- H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF_OBJ, HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
- HDF5Constants.H5P_DEFAULT, dset_data);
+ catch (Exception ex) {
+ ex.printStackTrace();
}
- catch (Exception e) {
- e.printStackTrace();
+ finally {
+ try {H5.H5Rdestroy(dset_data[1]);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(dset_data[0]);} catch (Exception ex) {}
}
// End access to the dataset and release resources used by it.
@@ -192,140 +200,91 @@ public class H5Ex_T_ObjectReference {
int object_type = -1;
long object_id = HDF5Constants.H5I_INVALID_HID;
long[] dims = { DIM0 };
- byte[][] dset_data;
+ byte[][] dset_data = new byte[DIM0][HDF5Constants.H5R_REF_BUF_SIZE];
// Open an existing file.
try {
file_id = H5.H5Fopen(FILENAME, HDF5Constants.H5F_ACC_RDONLY, HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- // Open an existing dataset.
- try {
- if (file_id >= 0)
- dataset_id = H5.H5Dopen(file_id, DATASETNAME, HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Get dataspace and allocate memory for read buffer.
- try {
- if (dataset_id >= 0)
- dataspace_id = H5.H5Dget_space(dataset_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- try {
- if (dataspace_id >= 0)
- H5.H5Sget_simple_extent_dims(dataspace_id, dims, null);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Allocate array of pointers to two-dimensional arrays (the
- // elements of the dataset.
- dset_data = new byte[(int)dims[0]][8];
-
- // Read the data using the default properties.
- try {
- if (dataset_id >= 0) {
- H5.H5Dread(dataset_id, HDF5Constants.H5T_STD_REF_OBJ, HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
- HDF5Constants.H5P_DEFAULT, dset_data);
- }
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Output the data to the screen.
- for (int indx = 0; indx < dims[0]; indx++) {
- System.out.println(DATASETNAME + "[" + indx + "]:");
- System.out.print(" ->");
- // Open the referenced object, get its name and type.
+ // Open an existing dataset.
try {
- if (dataset_id >= 0) {
- object_id = H5.H5Rdereference(dataset_id, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5R_OBJECT, dset_data[indx]);
- object_type = H5.H5Rget_obj_type(dataset_id, HDF5Constants.H5R_OBJECT, dset_data[indx]);
- }
- String obj_name = null;
- if (object_type >= 0) {
- // Get the length of the name and retrieve the name.
- obj_name = H5.H5Iget_name(object_id);
- }
- if ((object_id >= 0) && (object_type >= -1)) {
- switch (H5G_obj.get(object_type)) {
- case H5G_GROUP:
- System.out.print("H5G_GROUP");
- try {
- if (object_id >= 0)
- H5.H5Gclose(object_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- break;
- case H5G_DATASET:
- System.out.print("H5G_DATASET");
+ dataset_id = H5.H5Dopen(file_id, DATASETNAME, HDF5Constants.H5P_DEFAULT);
+
+ try {
+ // Get dataspace and allocate memory for read buffer.
+ dataspace_id = H5.H5Dget_space(dataset_id);
+ H5.H5Sget_simple_extent_dims(dataspace_id, dims, null);
+
+ // Read data.
+ H5.H5Dread(dataset_id, HDF5Constants.H5T_STD_REF, HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
+ HDF5Constants.H5P_DEFAULT, dset_data);
+
+ // Output the data to the screen.
+ for (int indx = 0; indx < dims[0]; indx++) {
+ System.out.println(DATASETNAME + "[" + indx + "]:");
+ System.out.print(" ->");
+ // Open the referenced object, get its name and type.
try {
- if (object_id >= 0)
- H5.H5Dclose(object_id);
+ object_id = H5.H5Ropen_object(dset_data[indx], HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ try {
+ object_type = H5.H5Rget_obj_type3(dset_data[indx], HDF5Constants.H5R_OBJECT);
+ String obj_name = null;
+ if (object_type >= 0) {
+ // Get the name.
+ obj_name = H5.H5Iget_name(object_id);
+ }
+ if ((object_id >= 0) && (object_type >= -1)) {
+ switch (H5G_obj.get(object_type)) {
+ case H5G_GROUP:
+ System.out.print("H5G_GROUP");
+ break;
+ case H5G_DATASET:
+ System.out.print("H5G_DATASET");
+ break;
+ case H5G_TYPE:
+ System.out.print("H5G_TYPE");
+ break;
+ default:
+ System.out.print("UNHANDLED");
+ }
+ }
+ // Print the name.
+ System.out.println(": " + obj_name);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+ finally {
+ try {H5.H5Oclose(object_id);} catch (Exception e) {}
+ }
}
- catch (Exception e) {
- e.printStackTrace();
+ catch (Exception e4) {
+ e4.printStackTrace();
}
- break;
- case H5G_TYPE:
- System.out.print("H5G_TYPE");
- try {
- if (object_id >= 0)
- H5.H5Tclose(object_id);
- }
- catch (Exception e) {
- e.printStackTrace();
+ finally {
+ try {H5.H5Rdestroy(dset_data[indx]);} catch (Exception e4) {}
}
- break;
- default:
- System.out.print("UNHANDLED");
- }
+ } // end for
+ }
+ catch (Exception e3) {
+ e3.printStackTrace();
+ }
+ finally {
+ try {H5.H5Sclose(dataspace_id);} catch (Exception e3) {}
}
- // Print the name.
- System.out.println(": " + obj_name);
}
- catch (Exception e) {
- e.printStackTrace();
+ catch (Exception e2) {
+ e2.printStackTrace();
+ }
+ finally {
+ try {H5.H5Dclose(dataset_id);} catch (Exception e2) {}
}
}
-
- // End access to the dataset and release resources used by it.
- try {
- if (dataspace_id >= 0)
- H5.H5Sclose(dataspace_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- try {
- if (dataset_id >= 0)
- H5.H5Dclose(dataset_id);
- }
- catch (Exception e) {
- e.printStackTrace();
+ catch (Exception e1) {
+ e1.printStackTrace();
}
-
- // Close the file.
- try {
- if (file_id >= 0)
- H5.H5Fclose(file_id);
- }
- catch (Exception e) {
- e.printStackTrace();
+ finally {
+ try {H5.H5Fclose(file_id);} catch (Exception e1) {}
}
}
diff --git a/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java b/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
index b38b0a0..f61ae0d 100644
--- a/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
@@ -18,7 +18,6 @@
Next, it reopens the file, dereferences the references,
and outputs the names of their targets to the screen.
************************************************************/
-
package examples.datatypes;
import java.util.EnumSet;
@@ -72,7 +71,7 @@ public class H5Ex_T_ObjectReferenceAttribute {
long dataset_id = HDF5Constants.H5I_INVALID_HID;
long attribute_id = HDF5Constants.H5I_INVALID_HID;
long[] dims = { DIM0 };
- byte[][] dset_data = new byte[DIM0][8];
+ byte[][] dset_data = new byte[DIM0][HDF5Constants.H5R_REF_BUF_SIZE];
// Create a new file using default properties.
try {
@@ -86,7 +85,7 @@ public class H5Ex_T_ObjectReferenceAttribute {
// Create dataset with a scalar dataspace.
try {
dataspace_id = H5.H5Screate(HDF5Constants.H5S_SCALAR);
- if (dataspace_id >= 0) {
+ if ((file_id >= 0) && (dataspace_id >= 0)) {
dataset_id = H5.H5Dcreate(file_id, DATASETNAME2, HDF5Constants.H5T_STD_I32LE, dataspace_id,
HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
if (dataset_id >= 0)
@@ -113,64 +112,72 @@ public class H5Ex_T_ObjectReferenceAttribute {
e.printStackTrace();
}
- // Create references to the previously created objects. Passing -1
- // as space_id causes this parameter to be ignored. Other values
- // besides valid dataspaces result in an error.
try {
if (file_id >= 0) {
- byte rbuf0[] = H5.H5Rcreate(file_id, GROUPNAME, HDF5Constants.H5R_OBJECT, -1);
- byte rbuf1[] = H5.H5Rcreate(file_id, DATASETNAME2, HDF5Constants.H5R_OBJECT, -1);
- for (int indx = 0; indx < 8; indx++) {
- dset_data[0][indx] = rbuf0[indx];
- dset_data[1][indx] = rbuf1[indx];
+ try {
+ dset_data[0] = H5.H5Rcreate_object(file_id, GROUPNAME, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ }
+
+ try {
+ dset_data[1] = H5.H5Rcreate_object(file_id, DATASETNAME2, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
}
}
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- // Create dataset with a scalar dataspace to serve as the parent
- // for the attribute.
- try {
- dataspace_id = H5.H5Screate(HDF5Constants.H5S_SCALAR);
- if (dataspace_id >= 0) {
- dataset_id = H5.H5Dcreate(file_id, DATASETNAME, HDF5Constants.H5T_STD_I32LE, dataspace_id,
- HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
- H5.H5Sclose(dataspace_id);
- dataspace_id = HDF5Constants.H5I_INVALID_HID;
+ // Create dataset with a scalar dataspace to serve as the parent
+ // for the attribute.
+ try {
+ dataspace_id = H5.H5Screate(HDF5Constants.H5S_SCALAR);
+ if (dataspace_id >= 0) {
+ dataset_id = H5.H5Dcreate(file_id, DATASETNAME, HDF5Constants.H5T_STD_I32LE, dataspace_id,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ H5.H5Sclose(dataspace_id);
+ dataspace_id = HDF5Constants.H5I_INVALID_HID;
+ }
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ // Create dataspace. Setting maximum size to NULL sets the maximum
+ // size to be the current size.
+ try {
+ dataspace_id = H5.H5Screate_simple(RANK, dims, null);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ // Create the attribute and write the array data to it.
+ try {
+ if ((dataset_id >= 0) && (dataspace_id >= 0))
+ attribute_id = H5.H5Acreate(dataset_id, ATTRIBUTENAME, HDF5Constants.H5T_STD_REF, dataspace_id,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ // Write the dataset.
+ try {
+ if (attribute_id >= 0)
+ H5.H5Awrite(attribute_id, HDF5Constants.H5T_STD_REF, dset_data);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
}
}
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Create dataspace. Setting maximum size to NULL sets the maximum
- // size to be the current size.
- try {
- dataspace_id = H5.H5Screate_simple(RANK, dims, null);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Create the attribute and write the array data to it.
- try {
- if ((dataset_id >= 0) && (dataspace_id >= 0))
- attribute_id = H5.H5Acreate(dataset_id, ATTRIBUTENAME, HDF5Constants.H5T_STD_REF_OBJ, dataspace_id,
- HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Write the dataset.
- try {
- if (attribute_id >= 0)
- H5.H5Awrite(attribute_id, HDF5Constants.H5T_STD_REF_OBJ, dset_data);
+ catch (Exception ex) {
+ ex.printStackTrace();
}
- catch (Exception e) {
- e.printStackTrace();
+ finally {
+ try {H5.H5Rdestroy(dset_data[1]);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(dset_data[0]);} catch (Exception ex) {}
}
// End access to the dataset and release resources used by it.
@@ -190,7 +197,6 @@ public class H5Ex_T_ObjectReferenceAttribute {
e.printStackTrace();
}
- // Terminate access to the data space.
try {
if (dataspace_id >= 0)
H5.H5Sclose(dataspace_id);
@@ -207,7 +213,6 @@ public class H5Ex_T_ObjectReferenceAttribute {
catch (Exception e) {
e.printStackTrace();
}
-
}
private static void ReadDataset() {
@@ -218,158 +223,102 @@ public class H5Ex_T_ObjectReferenceAttribute {
int object_type = -1;
long object_id = HDF5Constants.H5I_INVALID_HID;
long[] dims = { DIM0 };
- byte[][] dset_data;
+ byte[][] dset_data = new byte[DIM0][HDF5Constants.H5R_REF_BUF_SIZE];
// Open an existing file.
try {
file_id = H5.H5Fopen(FILENAME, HDF5Constants.H5F_ACC_RDONLY, HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- // Open an existing dataset.
- try {
- if (file_id >= 0)
- dataset_id = H5.H5Dopen(file_id, DATASETNAME, HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- try {
- if (dataset_id >= 0)
- attribute_id = H5.H5Aopen_by_name(dataset_id, ".", ATTRIBUTENAME, HDF5Constants.H5P_DEFAULT,
- HDF5Constants.H5P_DEFAULT);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Get dataspace and allocate memory for read buffer.
- try {
- if (attribute_id >= 0)
- dataspace_id = H5.H5Aget_space(attribute_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- try {
- if (dataspace_id >= 0)
- H5.H5Sget_simple_extent_dims(dataspace_id, dims, null);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Allocate array of pointers to two-dimensional arrays (the
- // elements of the dataset.
- dset_data = new byte[(int) dims[0]][8];
-
- // Read data.
- try {
- if (attribute_id >= 0)
- H5.H5Aread(attribute_id, HDF5Constants.H5T_STD_REF_OBJ, dset_data);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Output the data to the screen.
- for (int indx = 0; indx < dims[0]; indx++) {
- System.out.println(ATTRIBUTENAME + "[" + indx + "]:");
- System.out.print(" ->");
- // Open the referenced object, get its name and type.
+ // Open an existing dataset.
try {
- if (dataset_id >= 0) {
- object_id = H5.H5Rdereference(dataset_id, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5R_OBJECT, dset_data[indx]);
- object_type = H5.H5Rget_obj_type(dataset_id, HDF5Constants.H5R_OBJECT, dset_data[indx]);
+ dataset_id = H5.H5Dopen(file_id, DATASETNAME, HDF5Constants.H5P_DEFAULT);
+
+ try {
+ attribute_id = H5.H5Aopen_by_name(dataset_id, ".", ATTRIBUTENAME, HDF5Constants.H5P_DEFAULT,
+ HDF5Constants.H5P_DEFAULT);
+
+ // Get dataspace and allocate memory for read buffer.
+ try {
+ dataspace_id = H5.H5Aget_space(attribute_id);
+ H5.H5Sget_simple_extent_dims(dataspace_id, dims, null);
+
+ // Read data.
+ H5.H5Aread(attribute_id, HDF5Constants.H5T_STD_REF, dset_data);
+
+ // Output the data to the screen.
+ for (int indx = 0; indx < dims[0]; indx++) {
+ System.out.println(ATTRIBUTENAME + "[" + indx + "]:");
+ System.out.print(" ->");
+ // Open the referenced object, get its name and type.
+ try {
+ object_id = H5.H5Ropen_object(dset_data[indx], HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ try {
+ object_type = H5.H5Rget_obj_type3(dset_data[indx], HDF5Constants.H5R_OBJECT);
+ String obj_name = null;
+ if (object_type >= 0) {
+ // Get the name.
+ obj_name = H5.H5Iget_name(object_id);
+ }
+ if ((object_id >= 0) && (object_type >= -1)) {
+ switch (H5G_obj.get(object_type)) {
+ case H5G_GROUP:
+ System.out.print("H5G_GROUP");
+ break;
+ case H5G_DATASET:
+ System.out.print("H5G_DATASET");
+ break;
+ case H5G_TYPE:
+ System.out.print("H5G_TYPE");
+ break;
+ default:
+ System.out.print("UNHANDLED");
+ }
+ }
+ // Print the name.
+ System.out.println(": " + obj_name);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+ finally {
+ try {H5.H5Oclose(object_id);} catch (Exception e) {}
+ }
+ }
+ catch (Exception e5) {
+ e5.printStackTrace();
+ }
+ finally {
+ try {H5.H5Rdestroy(dset_data[indx]);} catch (Exception e5) {}
+ }
+ } // end for
+ }
+ catch (Exception e4) {
+ e4.printStackTrace();
+ }
+ finally {
+ try {H5.H5Sclose(dataspace_id);} catch (Exception e3) {}
+ }
}
- String obj_name = null;
- if (object_type >= 0) {
- // Get the length of the name and retrieve the name.
- obj_name = H5.H5Iget_name(object_id);
+ catch (Exception e3) {
+ e3.printStackTrace();
}
- if ((object_id >= 0) && (object_type >= -1)) {
- switch (H5G_obj.get(object_type)) {
- case H5G_GROUP:
- System.out.print("H5G_GROUP");
- try {
- if (object_id >= 0)
- H5.H5Gclose(object_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- break;
- case H5G_DATASET:
- System.out.print("H5G_DATASET");
- try {
- if (object_id >= 0)
- H5.H5Dclose(object_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- break;
- case H5G_TYPE:
- System.out.print("H5G_TYPE");
- try {
- if (object_id >= 0)
- H5.H5Tclose(object_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
- break;
- default:
- System.out.print("UNHANDLED");
- }
+ finally {
+ try {H5.H5Aclose(attribute_id);} catch (Exception e4) {}
}
- // Print the name.
- System.out.println(": " + obj_name);
}
- catch (Exception e) {
- e.printStackTrace();
+ catch (Exception e2) {
+ e2.printStackTrace();
+ }
+ finally {
+ try {H5.H5Dclose(dataset_id);} catch (Exception e2) {}
}
}
-
- // End access to the dataset and release resources used by it.
- try {
- if (attribute_id >= 0)
- H5.H5Aclose(attribute_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- try {
- if (dataset_id >= 0)
- H5.H5Dclose(dataset_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
- // Terminate access to the data space.
- try {
- if (dataspace_id >= 0)
- H5.H5Sclose(dataspace_id);
+ catch (Exception e1) {
+ e1.printStackTrace();
}
- catch (Exception e) {
- e.printStackTrace();
+ finally {
+ try {H5.H5Fclose(file_id);} catch (Exception e1) {}
}
-
- // Close the file.
- try {
- if (file_id >= 0)
- H5.H5Fclose(file_id);
- }
- catch (Exception e) {
- e.printStackTrace();
- }
-
}
public static void main(String[] args) {
diff --git a/java/examples/datatypes/JavaDatatypeExample.sh.in b/java/examples/datatypes/JavaDatatypeExample.sh.in
index e26d8c0..c6f5dbc 100644
--- a/java/examples/datatypes/JavaDatatypeExample.sh.in
+++ b/java/examples/datatypes/JavaDatatypeExample.sh.in
@@ -54,8 +54,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
"
LIST_JAR_TESTFILES="
-$HDFLIB_HOME/slf4j-api-1.7.25.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
+$HDFLIB_HOME/slf4j-api-1.7.33.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar
"
LIST_DATA_FILES="
$HDFTEST_HOME/../testfiles/examples.datatypes.H5Ex_T_Array.txt
@@ -216,7 +216,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/datatypes/Makefile.am b/java/examples/datatypes/Makefile.am
index 90790f7..55ff91f 100644
--- a/java/examples/datatypes/Makefile.am
+++ b/java/examples/datatypes/Makefile.am
@@ -27,7 +27,7 @@ classes:
pkgpath = examples/datatypes
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)datatypes.jar
diff --git a/java/examples/groups/JavaGroupExample.sh.in b/java/examples/groups/JavaGroupExample.sh.in
index 3b0e9d1..86f90bf 100644
--- a/java/examples/groups/JavaGroupExample.sh.in
+++ b/java/examples/groups/JavaGroupExample.sh.in
@@ -56,8 +56,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
"
LIST_JAR_TESTFILES="
-$HDFLIB_HOME/slf4j-api-1.7.25.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
+$HDFLIB_HOME/slf4j-api-1.7.33.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar
"
LIST_ITER_FILES="
$HDFTEST_HOME/h5ex_g_iterate.h5
@@ -255,7 +255,7 @@ COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
COPY_ITERFILES_TO_BLDITERDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/groups/Makefile.am b/java/examples/groups/Makefile.am
index bfde9ae..c520860 100644
--- a/java/examples/groups/Makefile.am
+++ b/java/examples/groups/Makefile.am
@@ -27,7 +27,7 @@ classes:
pkgpath = examples/groups
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)groups.jar
diff --git a/java/examples/intro/JavaIntroExample.sh.in b/java/examples/intro/JavaIntroExample.sh.in
index db741e5..a095f27 100644
--- a/java/examples/intro/JavaIntroExample.sh.in
+++ b/java/examples/intro/JavaIntroExample.sh.in
@@ -54,8 +54,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.*
$top_builddir/java/src/$JARFILE
"
LIST_JAR_TESTFILES="
-$HDFLIB_HOME/slf4j-api-1.7.25.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
+$HDFLIB_HOME/slf4j-api-1.7.33.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar
"
LIST_DATA_FILES="
$HDFTEST_HOME/../testfiles/examples.intro.H5_CreateDataset.txt
@@ -205,7 +205,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
diff --git a/java/examples/intro/Makefile.am b/java/examples/intro/Makefile.am
index 7d1aeab..741f122 100644
--- a/java/examples/intro/Makefile.am
+++ b/java/examples/intro/Makefile.am
@@ -27,7 +27,7 @@ classes:
pkgpath = examples/intro
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)intro.jar
diff --git a/java/examples/testfiles/examples.datasets.H5Ex_D_Sofloat.txt b/java/examples/testfiles/examples.datasets.H5Ex_D_Sofloat.txt
index 4d4b5d6..9025ce2 100644
--- a/java/examples/testfiles/examples.datasets.H5Ex_D_Sofloat.txt
+++ b/java/examples/testfiles/examples.datasets.H5Ex_D_Sofloat.txt
@@ -1,6 +1,6 @@
-Maximum value in write buffer is: 106.66666666666667
-Minimum value in write buffer is: 1.7692307692307692
+Maximum value in write buffer is: 106.666667
+Minimum value in write buffer is: 1.769231
Filter type is: H5Z_FILTER_SCALEOFFSET
-Maximum value in DS1 is: 106.66169811320755
-Minimum value in DS1 is: 1.7692307692307692
+Maximum value in DS1 is: 106.661698
+Minimum value in DS1 is: 1.769231
diff --git a/java/lib/ext/slf4j-nop-1.7.25.jar b/java/lib/ext/slf4j-nop-1.7.25.jar
deleted file mode 100644
index 78c7295..0000000
--- a/java/lib/ext/slf4j-nop-1.7.25.jar
+++ /dev/null
Binary files differ
diff --git a/java/lib/ext/slf4j-nop-1.7.33.jar b/java/lib/ext/slf4j-nop-1.7.33.jar
new file mode 100644
index 0000000..aa8fc09
--- /dev/null
+++ b/java/lib/ext/slf4j-nop-1.7.33.jar
Binary files differ
diff --git a/java/lib/ext/slf4j-simple-1.7.25.jar b/java/lib/ext/slf4j-simple-1.7.25.jar
deleted file mode 100644
index b29ca12..0000000
--- a/java/lib/ext/slf4j-simple-1.7.25.jar
+++ /dev/null
Binary files differ
diff --git a/java/lib/ext/slf4j-simple-1.7.33.jar b/java/lib/ext/slf4j-simple-1.7.33.jar
new file mode 100644
index 0000000..ebf4846
--- /dev/null
+++ b/java/lib/ext/slf4j-simple-1.7.33.jar
Binary files differ
diff --git a/java/lib/slf4j-api-1.7.25.jar b/java/lib/slf4j-api-1.7.25.jar
deleted file mode 100644
index 7e62f13..0000000
--- a/java/lib/slf4j-api-1.7.25.jar
+++ /dev/null
Binary files differ
diff --git a/java/lib/slf4j-api-1.7.33.jar b/java/lib/slf4j-api-1.7.33.jar
new file mode 100644
index 0000000..db3730d
--- /dev/null
+++ b/java/lib/slf4j-api-1.7.33.jar
Binary files differ
diff --git a/java/src/Makefile.am b/java/src/Makefile.am
index a0abb32..5bb72ad 100644
--- a/java/src/Makefile.am
+++ b/java/src/Makefile.am
@@ -36,7 +36,7 @@ jarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
hdf5_javadir = $(libdir)
pkgpath = hdf/hdf5lib
-CLASSPATH_ENV=CLASSPATH=.:$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$$CLASSPATH
AM_JAVACFLAGS = $(H5_JAVACFLAGS) -deprecation
diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java
index 49a539b..3180ede 100644
--- a/java/src/hdf/hdf5lib/H5.java
+++ b/java/src/hdf/hdf5lib/H5.java
@@ -216,7 +216,7 @@ import hdf.hdf5lib.structs.H5O_token_t;
* exception handlers to print out the HDF-5 error stack.
* <hr>
*
- * @version HDF5 1.13.1 <BR>
+ * @version HDF5 1.13.2 <BR>
* <b>See also: <a href ="./hdf.hdf5lib.HDFArray.html"> hdf.hdf5lib.HDFArray</a> </b><BR>
* <a href ="./hdf.hdf5lib.HDF5Constants.html"> hdf.hdf5lib.HDF5Constants</a><BR>
* <a href ="./hdf.hdf5lib.HDF5CDataTypes.html"> hdf.hdf5lib.HDF5CDataTypes</a><BR>
@@ -239,7 +239,7 @@ public class H5 implements java.io.Serializable {
*
* Make sure to update the versions number when a different library is used.
*/
- public final static int LIB_VERSION[] = { 1, 13, 1 };
+ public final static int LIB_VERSION[] = { 1, 13, 2 };
/**
* add system property to load library by path
diff --git a/java/src/hdf/hdf5lib/package-info.java b/java/src/hdf/hdf5lib/package-info.java
index 7edfcb2..2863f5a 100644
--- a/java/src/hdf/hdf5lib/package-info.java
+++ b/java/src/hdf/hdf5lib/package-info.java
@@ -171,4 +171,4 @@
*
* <b>See also: <a href="http://hdfgroup.org/HDF5/"> http://hdfgroup.org/HDF5"</a></b>
**/
-package hdf.hdf5lib; \ No newline at end of file
+package hdf.hdf5lib;
diff --git a/java/src/jni/h5util.c b/java/src/jni/h5util.c
index 02e99af..2de5d87 100644
--- a/java/src/jni/h5util.c
+++ b/java/src/jni/h5util.c
@@ -182,7 +182,7 @@ size_t
h5str_convert(JNIEnv *env, char **in_str, hid_t container, hid_t tid, void *out_buf, size_t out_buf_offset)
{
unsigned char *ucptr = NULL;
- static char fmt_llong[8], fmt_ullong[8];
+ char fmt_llong[8], fmt_ullong[8];
H5T_class_t tclass = H5T_NO_CLASS;
const char delimiter[] = " ," H5_COMPOUND_BEGIN_INDICATOR H5_COMPOUND_END_INDICATOR
H5_ARRAY_BEGIN_INDICATOR H5_ARRAY_END_INDICATOR H5_VLEN_BEGIN_INDICATOR H5_VLEN_END_INDICATOR;
@@ -211,10 +211,10 @@ h5str_convert(JNIEnv *env, char **in_str, hid_t container, hid_t tid, void *out_
/* Build default formats for long long types */
if (!fmt_llong[0]) {
- if (HDsprintf(fmt_llong, "%%%sd", H5_PRINTF_LL_WIDTH) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_convert: HDsprintf failure");
- if (HDsprintf(fmt_ullong, "%%%su", H5_PRINTF_LL_WIDTH) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_convert: HDsprintf failure");
+ if (HDsnprintf(fmt_llong, sizeof(fmt_llong), "%%%sd", H5_PRINTF_LL_WIDTH) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_convert: HDsnprintf failure");
+ if (HDsnprintf(fmt_ullong, sizeof(fmt_ullong), "%%%su", H5_PRINTF_LL_WIDTH) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_convert: HDsnprintf failure");
} /* end if */
switch (tclass) {
@@ -759,7 +759,7 @@ size_t
h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *in_buf, int expand_data)
{
unsigned char *ucptr = (unsigned char *)in_buf;
- static char fmt_llong[8], fmt_ullong[8];
+ char fmt_llong[8], fmt_ullong[8];
H5T_class_t tclass = H5T_NO_CLASS;
size_t typeSize = 0;
H5T_sign_t nsign = H5T_SGN_ERROR;
@@ -794,11 +794,12 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
HDmemcpy(&tmp_float, cptr, sizeof(float));
- if (NULL == (this_str = (char *)HDmalloc(25)))
+ size_t this_len = 25;
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY, "h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%g", tmp_float) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%g", tmp_float) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
break;
}
@@ -808,11 +809,12 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
HDmemcpy(&tmp_double, cptr, sizeof(double));
- if (NULL == (this_str = (char *)HDmalloc(25)))
+ size_t this_len = 25;
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY, "h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%g", tmp_double) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%g", tmp_double) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
break;
}
@@ -822,11 +824,12 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
HDmemcpy(&tmp_ldouble, cptr, sizeof(long double));
- if (NULL == (this_str = (char *)HDmalloc(27)))
+ size_t this_len = 27;
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY, "h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%Lg", tmp_ldouble) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%Lg", tmp_ldouble) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
break;
}
@@ -888,25 +891,26 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
unsigned char tmp_uchar = 0;
char tmp_char = 0;
+ size_t this_len = 7;
if (H5T_SGN_NONE == nsign) {
HDmemcpy(&tmp_uchar, cptr, sizeof(unsigned char));
- if (NULL == (this_str = (char *)HDmalloc(7)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%hhu", tmp_uchar) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%hhu", tmp_uchar) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
HDmemcpy(&tmp_char, cptr, sizeof(char));
- if (NULL == (this_str = (char *)HDmalloc(7)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%hhd", tmp_char) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%hhd", tmp_char) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
break;
@@ -916,25 +920,26 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
unsigned short tmp_ushort = 0;
short tmp_short = 0;
+ size_t this_len = 9;
if (H5T_SGN_NONE == nsign) {
HDmemcpy(&tmp_ushort, cptr, sizeof(unsigned short));
- if (NULL == (this_str = (char *)HDmalloc(9)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%hu", tmp_ushort) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%hu", tmp_ushort) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
HDmemcpy(&tmp_short, cptr, sizeof(short));
- if (NULL == (this_str = (char *)HDmalloc(9)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%hd", tmp_short) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%hd", tmp_short) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
break;
@@ -944,25 +949,26 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
unsigned int tmp_uint = 0;
int tmp_int = 0;
+ size_t this_len = 14;
if (H5T_SGN_NONE == nsign) {
HDmemcpy(&tmp_uint, cptr, sizeof(unsigned int));
- if (NULL == (this_str = (char *)HDmalloc(14)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%u", tmp_uint) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%u", tmp_uint) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
HDmemcpy(&tmp_int, cptr, sizeof(int));
- if (NULL == (this_str = (char *)HDmalloc(14)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%d", tmp_int) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%d", tmp_int) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
break;
@@ -972,25 +978,26 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
unsigned long tmp_ulong = 0;
long tmp_long = 0;
+ size_t this_len = 23;
if (H5T_SGN_NONE == nsign) {
HDmemcpy(&tmp_ulong, cptr, sizeof(unsigned long));
- if (NULL == (this_str = (char *)HDmalloc(23)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%lu", tmp_ulong) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%lu", tmp_ulong) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
HDmemcpy(&tmp_long, cptr, sizeof(long));
- if (NULL == (this_str = (char *)HDmalloc(23)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%ld", tmp_long) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%ld", tmp_long) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
break;
@@ -1001,25 +1008,26 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
unsigned long long tmp_ullong = 0;
long long tmp_llong = 0;
+ size_t this_len = 25;
if (H5T_SGN_NONE == nsign) {
HDmemcpy(&tmp_ullong, cptr, sizeof(unsigned long long));
- if (NULL == (this_str = (char *)HDmalloc(25)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, fmt_ullong, tmp_ullong) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, fmt_ullong, tmp_ullong) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
HDmemcpy(&tmp_llong, cptr, sizeof(long long));
- if (NULL == (this_str = (char *)HDmalloc(25)))
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY,
"h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, fmt_llong, tmp_llong) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, fmt_llong, tmp_llong) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
break;
@@ -1079,17 +1087,18 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
else {
size_t i;
- if (NULL == (this_str = (char *)HDmalloc(4 * (typeSize + 1))))
+ size_t this_len = 4 * (typeSize + 1);
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY, "h5str_sprintf: failed to allocate string buffer");
if (1 == typeSize) {
- if (HDsprintf(this_str, "%#02x", ucptr[0]) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%#02x", ucptr[0]) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
for (i = 0; i < typeSize; i++)
- if (HDsprintf(this_str, "%s%02x", i ? ":" : "", ucptr[i]) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%s%02x", i ? ":" : "", ucptr[i]) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
}
@@ -1134,11 +1143,12 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
else
H5_LIBRARY_ERROR(ENVONLY);
- if (NULL == (this_str = (char *)HDmalloc(14)))
+ size_t this_len = 14;
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(
ENVONLY, "h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%u-", (unsigned)oi.type) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%u-", (unsigned)oi.type) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
if (!h5str_append(out_str, this_str))
H5_ASSERTION_ERROR(ENVONLY, "Unable to append string.");
HDfree(this_str);
@@ -1150,11 +1160,13 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
H5Otoken_to_str(tid, &oi.token, &token_str);
- if (NULL == (this_str = (char *)HDmalloc(64 + strlen(token_str) + 1)))
+ size_t that_len = 64 + strlen(token_str) + 1;
+ if (NULL == (this_str = HDmalloc(that_len)))
H5_OUT_OF_MEMORY_ERROR(
ENVONLY, "h5str_sprintf: failed to allocate string buffer");
- if (HDsprintf(this_str, "%lu:%s", oi.fileno, token_str) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, that_len, "%lu:%s", oi.fileno, token_str) <
+ 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
H5free_memory(token_str);
}
@@ -1307,17 +1319,18 @@ h5str_sprintf(JNIEnv *env, h5str_t *out_str, hid_t container, hid_t tid, void *i
/* All other types get printed as hexadecimal */
- if (NULL == (this_str = (char *)HDmalloc(4 * (typeSize + 1))))
+ size_t this_len = 4 * (typeSize + 1);
+ if (NULL == (this_str = (char *)HDmalloc(this_len)))
H5_OUT_OF_MEMORY_ERROR(ENVONLY, "h5str_sprintf: failed to allocate string buffer");
if (1 == typeSize) {
- if (HDsprintf(this_str, "%#02x", ucptr[0]) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%#02x", ucptr[0]) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
else {
for (i = 0; i < typeSize; i++)
- if (HDsprintf(this_str, "%s%02x", i ? ":" : "", ucptr[i]) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsprintf failure");
+ if (HDsnprintf(this_str, this_len, "%s%02x", i ? ":" : "", ucptr[i]) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_sprintf: HDsnprintf failure");
}
break;
@@ -1519,8 +1532,9 @@ h5str_dump_region_blocks(JNIEnv *env, h5str_t *str, hid_t region_space, hid_t re
for (j = 0; j < ndims; j++) {
tmp_str[0] = '\0';
- if (HDsprintf(tmp_str, "%s%lu", j ? "," : "(", (unsigned long)ptdata[i * 2 * ndims + j]) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_dump_region_blocks: HDsprintf failure");
+ if (HDsnprintf(tmp_str, sizeof(tmp_str), "%s%lu", j ? "," : "(",
+ (unsigned long)ptdata[i * 2 * ndims + j]) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_dump_region_blocks: HDsnprintf failure");
if (!h5str_append(str, tmp_str))
H5_ASSERTION_ERROR(ENVONLY, "Unable to append string.");
@@ -1529,9 +1543,9 @@ h5str_dump_region_blocks(JNIEnv *env, h5str_t *str, hid_t region_space, hid_t re
for (j = 0; j < ndims; j++) {
tmp_str[0] = '\0';
- if (HDsprintf(tmp_str, "%s%lu", j ? "," : ")-(",
- (unsigned long)ptdata[i * 2 * ndims + j + ndims]) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_dump_region_blocks: HDsprintf failure");
+ if (HDsnprintf(tmp_str, sizeof(tmp_str), "%s%lu", j ? "," : ")-(",
+ (unsigned long)ptdata[i * 2 * ndims + j + ndims]) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_dump_region_blocks: HDsnprintf failure");
if (!h5str_append(str, tmp_str))
H5_ASSERTION_ERROR(ENVONLY, "Unable to append string.");
@@ -1696,8 +1710,9 @@ h5str_dump_region_points(JNIEnv *env, h5str_t *str, hid_t region_space, hid_t re
for (j = 0; j < ndims; j++) {
tmp_str[0] = '\0';
- if (HDsprintf(tmp_str, "%s%lu", j ? "," : "(", (unsigned long)(ptdata[i * ndims + j])) < 0)
- H5_JNI_FATAL_ERROR(ENVONLY, "h5str_dump_region_points: HDsprintf failure");
+ if (HDsnprintf(tmp_str, sizeof(tmp_str), "%s%lu", j ? "," : "(",
+ (unsigned long)(ptdata[i * ndims + j])) < 0)
+ H5_JNI_FATAL_ERROR(ENVONLY, "h5str_dump_region_points: HDsnprintf failure");
if (!h5str_append(str, tmp_str))
H5_ASSERTION_ERROR(ENVONLY, "Unable to append string.");
diff --git a/java/test/CMakeLists.txt b/java/test/CMakeLists.txt
index c6cf607..b1186a4 100644
--- a/java/test/CMakeLists.txt
+++ b/java/test/CMakeLists.txt
@@ -39,6 +39,7 @@ set (HDF5_JAVA_TEST_SOURCES
TestH5Oparams
TestH5Obasic
TestH5Ocreate
+ TestH5OcopyOld
TestH5Ocopy
TestH5PL
TestH5VL
diff --git a/java/test/Makefile.am b/java/test/Makefile.am
index ac3b619..4a6785d 100644
--- a/java/test/Makefile.am
+++ b/java/test/Makefile.am
@@ -27,7 +27,7 @@ classes:
pkgpath = test
hdfjarfile = jar$(PACKAGE_TARNAME)-$(PACKAGE_VERSION).jar
-CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/junit.jar:$(top_srcdir)/java/lib/hamcrest-core.jar:$(top_srcdir)/java/lib/slf4j-api-1.7.25.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.25.jar:$$CLASSPATH
+CLASSPATH_ENV=CLASSPATH=.:$(JAVAROOT):$(top_builddir)/java/src/$(hdfjarfile):$(top_srcdir)/java/lib/junit.jar:$(top_srcdir)/java/lib/hamcrest-core.jar:$(top_srcdir)/java/lib/slf4j-api-1.7.33.jar:$(top_srcdir)/java/lib/ext/slf4j-simple-1.7.33.jar:$$CLASSPATH
jarfile = jar$(PACKAGE_TARNAME)test.jar
@@ -71,6 +71,7 @@ noinst_JAVA = \
TestH5Oparams.java \
TestH5Obasic.java \
TestH5Ocreate.java \
+ TestH5OcopyOld.java \
TestH5Ocopy.java \
TestH5PL.java \
TestH5VL.java \
diff --git a/java/test/TestH5.java b/java/test/TestH5.java
index 1f81f09..168f28a 100644
--- a/java/test/TestH5.java
+++ b/java/test/TestH5.java
@@ -287,7 +287,7 @@ public class TestH5 {
*/
@Test
public void testH5get_libversion() {
- int libversion[] = { 1, 13, 1 };
+ int libversion[] = { 1, 13, 2 };
try {
H5.H5get_libversion(libversion);
@@ -326,7 +326,7 @@ public class TestH5 {
*/
@Test
public void testH5check_version() {
- int majnum = 1, minnum = 13, relnum = 1;
+ int majnum = 1, minnum = 13, relnum = 2;
try {
H5.H5check_version(majnum, minnum, relnum);
diff --git a/java/test/TestH5Ocopy.java b/java/test/TestH5Ocopy.java
index b3b1acd..821cad3 100644
--- a/java/test/TestH5Ocopy.java
+++ b/java/test/TestH5Ocopy.java
@@ -136,33 +136,46 @@ public class TestH5Ocopy {
@Test
public void testH5OcopyRefsAttr() {
long ocp_plist_id = HDF5Constants.H5I_INVALID_HID;
- byte rbuf0[]=null , rbuf1[] = null;
- byte[] dset_data = new byte[16];
+ byte[][] dset_data = new byte[2][HDF5Constants.H5R_REF_BUF_SIZE];
long attribute_id = HDF5Constants.H5I_INVALID_HID;
-
- try {
- rbuf0 = H5.H5Rcreate(H5fid, "/G1", HDF5Constants.H5R_OBJECT, -1);
- rbuf1 = H5.H5Rcreate(H5fid, "DS2", HDF5Constants.H5R_OBJECT, -1);
- //System.arraycopy(rbuf0, 0, dset_data, 0, 8);
- System.arraycopy(rbuf1, 0, dset_data, 8, 8);
- }
- catch (Exception ex) {
- fail("testH5OcopyRefsAttr: H5Rcreate failed");
- }
-
try {
- attribute_id = H5.H5Acreate(H5did2, "A1", HDF5Constants.H5T_STD_REF_OBJ, H5dsid, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
- assertTrue("testH5OcopyRefsAttr.H5Acreate: ", attribute_id >= 0);
- H5.H5Awrite(attribute_id, HDF5Constants.H5T_STD_REF_OBJ, dset_data);
-
- H5.H5Aclose(attribute_id);
+ try {
+ dset_data[0] = H5.H5Rcreate_object(H5fid, "/G1", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5OcopyRefsAttr: H5Rcreate_object " + err);
+ }
+
+ try {
+ dset_data[1] = H5.H5Rcreate_object(H5fid, "DS2", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5OcopyRefsAttr: H5Rcreate_object " + err);
+ }
+
+ try {
+ attribute_id = H5.H5Acreate(H5did2, "A1", HDF5Constants.H5T_STD_REF, H5dsid, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsAttr.H5Acreate: ", attribute_id >= 0);
+ H5.H5Awrite(attribute_id, HDF5Constants.H5T_STD_REF, dset_data);
+
+ H5.H5Aclose(attribute_id);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsAttr: H5Awrite failed");
+ }
+ finally {
+ try {H5.H5Aclose(attribute_id);} catch (Exception exx) {}
+ }
}
catch (Exception ex) {
- fail("testH5OcopyRefsAttr: H5Awrite failed");
+ ex.printStackTrace();
}
finally {
- try {H5.H5Aclose(attribute_id);} catch (Exception exx) {}
+ try {H5.H5Rdestroy(dset_data[1]);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(dset_data[0]);} catch (Exception ex) {}
}
try {
@@ -181,30 +194,50 @@ public class TestH5Ocopy {
@Test
public void testH5OcopyRefsDatasettodiffFile() {
- byte rbuf1[] = null;
- byte[] dset_data = new byte[16];
+ byte[][] dset_data = new byte[2][HDF5Constants.H5R_REF_BUF_SIZE];
long ocp_plist_id = HDF5Constants.H5I_INVALID_HID;
long dataset_id = HDF5Constants.H5I_INVALID_HID;
long H5fid2 = HDF5Constants.H5I_INVALID_HID;
try {
- rbuf1 = H5.H5Rcreate(H5fid, "DS2", HDF5Constants.H5R_OBJECT, -1);
- System.arraycopy(rbuf1, 0, dset_data, 8, 8);
-
- dataset_id = H5.H5Dcreate(H5fid, "DSREF",
- HDF5Constants.H5T_STD_REF_OBJ, H5dsid,
- HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
- assertTrue("testH5OcopyRefsDatasettodiffFile.H5Dcreate: ", dataset_id >= 0);
- H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF_OBJ,
- HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
- HDF5Constants.H5P_DEFAULT, dset_data);
- H5.H5Dclose(dataset_id);
+ try {
+ dset_data[0] = H5.H5Rcreate_object(H5fid, "/G1", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5OcopyRefsDatasettodiffFile: H5Rcreate_object " + err);
+ }
+ try {
+ dset_data[1] = H5.H5Rcreate_object(H5fid, "DS2", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5OcopyRefsDatasettodiffFile: H5Rcreate_object " + err);
+ }
+
+ try {
+ dataset_id = H5.H5Dcreate(H5fid, "DSREF",
+ HDF5Constants.H5T_STD_REF, H5dsid,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettodiffFile.H5Dcreate: ", dataset_id >= 0);
+ H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF,
+ HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
+ HDF5Constants.H5P_DEFAULT, dset_data);
+ H5.H5Dclose(dataset_id);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsDatasettodiffFile: create dataset failed");
+ }
+ finally {
+ try {H5.H5Dclose(dataset_id);} catch (Exception exx) {}
+ }
}
catch (Exception ex) {
- fail("testH5OcopyRefsDatasettodiffFile: create dataset failed");
+ ex.printStackTrace();
}
finally {
- try {H5.H5Dclose(dataset_id);} catch (Exception exx) {}
+ try {H5.H5Rdestroy(dset_data[0]);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(dset_data[1]);} catch (Exception ex) {}
}
try {
@@ -241,123 +274,154 @@ public class TestH5Ocopy {
@Test
public void testH5OcopyRefsDatasettosameFile() {
- byte rbuf0[]=null , rbuf1[] = null;
- byte[] dset_data = new byte[16];
+ byte[][] dset_data = new byte[2][HDF5Constants.H5R_REF_BUF_SIZE];
+ byte[][] read_data = new byte[2][HDF5Constants.H5R_REF_BUF_SIZE];
long ocp_plist_id = HDF5Constants.H5I_INVALID_HID;
long dataset_id = HDF5Constants.H5I_INVALID_HID;
long did = HDF5Constants.H5I_INVALID_HID;
int obj_type = -1;
- byte[] read_data = new byte[16];
try {
- rbuf0 = H5.H5Rcreate(H5fid, "/G1", HDF5Constants.H5R_OBJECT, -1);
- rbuf1 = H5.H5Rcreate(H5fid, "DS2", HDF5Constants.H5R_OBJECT, -1);
- System.arraycopy(rbuf0, 0, dset_data, 0, 8);
- System.arraycopy(rbuf1, 0, dset_data, 8, 8);
-
- //Create a dataset and write object references to it.
- dataset_id = H5.H5Dcreate(H5fid, "DSREF",
- HDF5Constants.H5T_STD_REF_OBJ, H5dsid,
- HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
- assertTrue("testH5OcopyRefsDatasettosameFile.H5Dcreate: ", dataset_id >= 0);
- H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF_OBJ,
- HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
- HDF5Constants.H5P_DEFAULT, dset_data);
- //Close the dataset.
- H5.H5Dclose(dataset_id);
+ try {
+ dset_data[0] = H5.H5Rcreate_object(H5fid, "/G1", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5OcopyRefsDatasettosameFile: H5Rcreate_object " + err);
+ }
+
+ try {
+ dset_data[1] = H5.H5Rcreate_object(H5fid, "DS2", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5OcopyRefsDatasettosameFile: H5Rcreate_object " + err);
+ }
+
+ try {
+ //Create a dataset and write object references to it.
+ dataset_id = H5.H5Dcreate(H5fid, "DSREF",
+ HDF5Constants.H5T_STD_REF, H5dsid,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettosameFile.H5Dcreate: ", dataset_id >= 0);
+ H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF,
+ HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
+ HDF5Constants.H5P_DEFAULT, dset_data);
+ //Close the dataset.
+ H5.H5Dclose(dataset_id);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsDatasettosameFile: create dataset failed");
+ }
+ finally {
+ try {H5.H5Dclose(dataset_id);} catch (Exception exx) {}
+ }
}
catch (Exception ex) {
- try {H5.H5Dclose(dataset_id);} catch (Exception exx) {}
- fail("testH5OcopyRefsDatasettosameFile: create dataset failed");
+ ex.printStackTrace();
+ }
+ finally {
+ try {H5.H5Rdestroy(dset_data[1]);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(dset_data[0]);} catch (Exception ex) {}
}
try {
ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
assertTrue("testH5OcopyRefsDatasettosameFile.H5Pcreate: ", ocp_plist_id >= 0);
H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+ //Perform copy function.
+ try {
+ H5.H5Ocopy(H5fid, "DSREF", H5fid, "CPYREFD", ocp_plist_id, HDF5Constants.H5P_DEFAULT);
+ }
+ catch(Exception ex) {
+ fail("testH5OcopyRefsDatasettosameFile: H5Ocopy failed");
+ }
}
catch (Exception ex) {
- try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
fail("testH5OcopyRefsDatasettosameFile: H5Pset_copy_object failed");
}
-
- //Perform copy function.
- try {
- H5.H5Ocopy(H5fid, "DSREF", H5fid, "CPYREFD", ocp_plist_id, HDF5Constants.H5P_DEFAULT);
- }
- catch(Exception ex) {
+ finally {
try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
- fail("testH5OcopyRefsDatasettosameFile: H5Ocopy failed");
}
- //Open the dataset that has been copied
try {
- did = H5.H5Dopen(H5fid, "DSREF", HDF5Constants.H5P_DEFAULT);
- assertTrue("testH5OcopyRefsDatasettosameFile.H5Dopen: ", did >= 0);
+ //Open the dataset that has been copied
+ try {
+ did = H5.H5Dopen(H5fid, "DSREF", HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettosameFile.H5Dopen: ", did >= 0);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ fail("testH5OcopyRefsDatasettosameFile: H5Dopen failed");
+ }
+
+ //Read the dataset object references in the read_data buffer.
+ try {
+ H5.H5Dread(did, HDF5Constants.H5T_STD_REF, HDF5Constants.H5S_ALL,HDF5Constants.H5S_ALL, HDF5Constants.H5P_DEFAULT, read_data);
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ fail("testH5OcopyRefsDatasettosameFile: H5Dread failed");
+ }
}
- catch (Exception e) {
- try {H5.H5Dclose(did);} catch (Exception exx) {}
- e.printStackTrace();
- fail("testH5OcopyRefsDatasettosameFile: H5Dopen failed");
+ catch (Exception ex) {
+ ex.printStackTrace();
+ fail("testH5OcopyRefsDatasettosameFile: open and read dataset failed");
+ }
+ finally {
+ try {H5.H5Dclose(did);} catch (Exception ex) {}
}
try {
- //Read the dataset object references in the read_data buffer.
- H5.H5Dread(did, HDF5Constants.H5T_STD_REF_OBJ, HDF5Constants.H5S_ALL,HDF5Constants.H5S_ALL, HDF5Constants.H5P_DEFAULT, read_data);
- System.arraycopy(read_data, 0, rbuf0, 0, 8);
- System.arraycopy(read_data, 8, rbuf1, 0, 8);
-
//Get the type of object the reference points to.
- obj_type = H5.H5Rget_obj_type(H5fid, HDF5Constants.H5R_OBJECT, rbuf1);
+ obj_type = H5.H5Rget_obj_type3(read_data[1], HDF5Constants.H5R_OBJECT);
assertEquals(obj_type, HDF5Constants.H5O_TYPE_DATASET);
-
- obj_type = H5.H5Rget_obj_type(H5fid, HDF5Constants.H5R_OBJECT, rbuf0);
+
+ obj_type = H5.H5Rget_obj_type3(read_data[0], HDF5Constants.H5R_OBJECT);
assertEquals(obj_type, HDF5Constants.H5O_TYPE_GROUP);
}
catch (Exception ex) {
ex.printStackTrace();
}
finally {
- try {H5.H5Dclose(did);} catch (Exception ex) {}
- try {H5.H5Pclose(ocp_plist_id);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(read_data[1]);} catch (Exception ex) {}
+ try {H5.H5Rdestroy(read_data[0]);} catch (Exception ex) {}
+ }
+ }
+
+ @Test
+ public void testH5OcopyNullRef() throws Throwable {
+ final long _pid_ = HDF5Constants.H5P_DEFAULT;
+ long sid = HDF5Constants.H5I_INVALID_HID;
+ long did = HDF5Constants.H5I_INVALID_HID;
+ long aid = HDF5Constants.H5I_INVALID_HID;
+
+ try {
+ sid = H5.H5Screate_simple(1, new long[] {1}, null);
+ assertTrue("testH5OcopyNullRef.H5Screate_simple: ", sid >= 0);
+ did = H5.H5Dcreate(H5fid, "Dataset_with_null_Ref", HDF5Constants.H5T_NATIVE_INT, sid, _pid_, _pid_, _pid_);
+ assertTrue("testH5OcopyNullRef.H5Dcreate: ", did > 0);
+ aid = H5.H5Acreate(did, "Null_Ref", HDF5Constants.H5T_STD_REF, sid, _pid_, _pid_);
+ assertTrue("testH5OcopyNullRef.H5Acreate: ", aid > 0);
}
- }
-
-// @Ignore because of JIRA HDF5-9547
-// @Test(expected = HDF5LibraryException.class)
-// public void testH5OcopyInvalidRef() throws Throwable {
-// final long _pid_ = HDF5Constants.H5P_DEFAULT;
-// long sid = HDF5Constants.H5I_INVALID_HID;
-// long did = HDF5Constants.H5I_INVALID_HID;
-// long aid = HDF5Constants.H5I_INVALID_HID;
-//
-// try {
-// sid = H5.H5Screate_simple(1, new long[] {1}, null);
-// assertTrue("testH5OcopyInvalidRef.H5Screate_simple: ", sid >= 0);
-// did = H5.H5Dcreate(H5fid, "Dataset_with_invalid_Ref", HDF5Constants.H5T_NATIVE_INT, sid, _pid_, _pid_, _pid_);
-// assertTrue("testH5OcopyInvalidRef.H5Dcreate: ", did > 0);
-// aid = H5.H5Acreate(did, "Invalid_Ref", HDF5Constants.H5T_STD_REF_OBJ, sid, _pid_, _pid_);
-// assertTrue("testH5OcopyInvalidRef.H5Acreate: ", aid > 0);
-// H5.H5Awrite(aid, HDF5Constants.H5T_STD_REF_OBJ, new long[]{-1});
-// }
-// catch (Exception ex) {
-// ex.printStackTrace();
-// }
-// finally {
-// try {H5.H5Dclose(did);} catch (Exception exx) {}
-// try {H5.H5Aclose(aid);} catch (Exception exx) {}
-// try {H5.H5Sclose(sid);} catch (Exception exx) {}
-// }
-//
-// long ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
-// assertTrue("testH5OcopyInvalidRef.H5Pcreate: ", ocp_plist_id >= 0);
-// H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
-// try {
-// H5.H5Ocopy(H5fid, "/Dataset_with_invalid_Ref", H5fid, "/Dataset_with_invalid_Ref_cp", ocp_plist_id, _pid_);
-// }
-// finally {
-// try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
-// }
-// }
+ catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ finally {
+ try {H5.H5Dclose(did);} catch (Exception exx) {}
+ try {H5.H5Aclose(aid);} catch (Exception exx) {}
+ try {H5.H5Sclose(sid);} catch (Exception exx) {}
+ }
+
+ long ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
+ assertTrue("testH5OcopyNullRef.H5Pcreate: ", ocp_plist_id >= 0);
+ H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+ try {
+ H5.H5Ocopy(H5fid, "/Dataset_with_null_Ref", H5fid, "/Dataset_with_null_Ref_cp", ocp_plist_id, _pid_);
+ }
+ finally {
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
+ }
+ }
}
diff --git a/java/test/TestH5OcopyOld.java b/java/test/TestH5OcopyOld.java
new file mode 100644
index 0000000..e547051
--- /dev/null
+++ b/java/test/TestH5OcopyOld.java
@@ -0,0 +1,397 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+package test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+
+import hdf.hdf5lib.H5;
+import hdf.hdf5lib.HDF5Constants;
+import hdf.hdf5lib.exceptions.HDF5Exception;
+import hdf.hdf5lib.exceptions.HDF5LibraryException;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+
+public class TestH5OcopyOld {
+ @Rule public TestName testname = new TestName();
+ private static final String FILENAME = "testRefsattributeO.h5";
+ private static final int DIM_X = 4;
+ private static final int DIM_Y = 6;
+ long H5fid = HDF5Constants.H5I_INVALID_HID;
+ long H5dsid = HDF5Constants.H5I_INVALID_HID;
+ long H5did1 = HDF5Constants.H5I_INVALID_HID;
+ long H5did2 = HDF5Constants.H5I_INVALID_HID;
+ long H5gcpl = HDF5Constants.H5I_INVALID_HID;
+ long H5gid = HDF5Constants.H5I_INVALID_HID;
+ long H5dsid2 = HDF5Constants.H5I_INVALID_HID;
+ long[] dims = { 2 };
+
+ private final void _deleteFile(String filename) {
+ File file = new File(filename);
+
+ if (file.exists()) {
+ try {
+ file.delete();
+ }
+ catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ private final long _createDataset(long fid, long dsid, String name, long dapl) {
+ long did = HDF5Constants.H5I_INVALID_HID;
+ try {
+ did = H5.H5Dcreate(fid, name,
+ HDF5Constants.H5T_STD_I32BE, dsid,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, dapl);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("H5.H5Dcreate: " + err);
+ }
+ assertTrue("TestH5O._createDataset: ",did >= 0);
+
+ return did;
+ }
+
+ private final long _createGroup(long fid, String name) {
+ long gid = HDF5Constants.H5I_INVALID_HID;
+ try {
+ H5gcpl = HDF5Constants.H5P_DEFAULT;
+ gid = H5.H5Gcreate(fid, name, HDF5Constants.H5P_DEFAULT,
+ H5gcpl, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("H5.H5Gcreate: " + err);
+ }
+ assertTrue("TestH5O._createGroup: ",gid >= 0);
+
+ return gid;
+ }
+
+ @Before
+ public void createH5file()
+ throws NullPointerException, HDF5Exception {
+ assertTrue("H5 open ids is 0",H5.getOpenIDCount()==0);
+ System.out.print(testname.getMethodName());
+ try {
+ H5fid = H5.H5Fcreate(FILENAME, HDF5Constants.H5F_ACC_TRUNC,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ H5dsid2 = H5.H5Screate(HDF5Constants.H5S_SCALAR);
+ H5did1 = _createDataset(H5fid, H5dsid2, "DS2", HDF5Constants.H5P_DEFAULT);
+ H5dsid = H5.H5Screate_simple(1, dims, null);
+ H5gid = _createGroup(H5fid, "/G1");
+ H5did2 = _createDataset(H5gid, H5dsid, "DS1", HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("TestH5O.createH5file: " + err);
+ }
+ assertTrue("TestH5O.createH5file: H5.H5Fcreate: ",H5fid >= 0);
+ assertTrue("TestH5O.createH5file: H5.H5Screate_simple: ",H5dsid >= 0);
+ assertTrue("TestH5O.createH5file: H5.H5Gcreate: ",H5gid >= 0);
+
+ H5.H5Fflush(H5fid, HDF5Constants.H5F_SCOPE_LOCAL);
+ }
+
+ @After
+ public void deleteH5file() throws HDF5LibraryException {
+ if (H5gid > 0)
+ try {H5.H5Gclose(H5gid);} catch (Exception ex) {}
+ if (H5did2 > 0)
+ try {H5.H5Dclose(H5did2);} catch (Exception ex) {}
+ if (H5dsid > 0)
+ try {H5.H5Sclose(H5dsid);} catch (Exception ex) {}
+ if (H5dsid2 > 0)
+ try {H5.H5Sclose(H5dsid2);} catch (Exception ex) {}
+ if (H5did1 > 0)
+ try {H5.H5Dclose(H5did1);} catch (Exception ex) {}
+ if (H5fid > 0)
+ try {H5.H5Fclose(H5fid);} catch (Exception ex) {}
+
+ _deleteFile(FILENAME);
+ System.out.println();
+ }
+
+ @Test
+ public void testH5OcopyRefsAttr() {
+ long ocp_plist_id = HDF5Constants.H5I_INVALID_HID;
+ byte rbuf0[]=null , rbuf1[] = null;
+ byte[] dset_data = new byte[16];
+ long attribute_id = HDF5Constants.H5I_INVALID_HID;
+
+ try {
+ rbuf0 = H5.H5Rcreate(H5fid, "/G1", HDF5Constants.H5R_OBJECT, -1);
+ rbuf1 = H5.H5Rcreate(H5fid, "DS2", HDF5Constants.H5R_OBJECT, -1);
+ //System.arraycopy(rbuf0, 0, dset_data, 0, 8);
+ System.arraycopy(rbuf1, 0, dset_data, 8, 8);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsAttr: H5Rcreate failed");
+ }
+
+ try {
+ attribute_id = H5.H5Acreate(H5did2, "A1", HDF5Constants.H5T_STD_REF_OBJ, H5dsid, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsAttr.H5Acreate: ", attribute_id >= 0);
+ H5.H5Awrite(attribute_id, HDF5Constants.H5T_STD_REF_OBJ, dset_data);
+
+ H5.H5Aclose(attribute_id);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsAttr: H5Awrite failed");
+ }
+ finally {
+ try {H5.H5Aclose(attribute_id);} catch (Exception exx) {}
+ }
+
+ try {
+ ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
+ assertTrue("testH5OcopyRefsAttr.H5Pcreate: ", ocp_plist_id >= 0);
+ H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+ H5.H5Ocopy(H5fid, ".", H5fid, "CPYREF", ocp_plist_id, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsAttr: H5Ocopy failed");
+ }
+ finally {
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception ex) {}
+ }
+ }
+
+ @Test
+ public void testH5OcopyRefsDatasettodiffFile() {
+ byte rbuf1[] = null;
+ byte[] dset_data = new byte[16];
+ long ocp_plist_id = HDF5Constants.H5I_INVALID_HID;
+ long dataset_id = HDF5Constants.H5I_INVALID_HID;
+ long H5fid2 = HDF5Constants.H5I_INVALID_HID;
+
+ try {
+ rbuf1 = H5.H5Rcreate(H5fid, "DS2", HDF5Constants.H5R_OBJECT, -1);
+ System.arraycopy(rbuf1, 0, dset_data, 8, 8);
+
+ dataset_id = H5.H5Dcreate(H5fid, "DSREF",
+ HDF5Constants.H5T_STD_REF_OBJ, H5dsid,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettodiffFile.H5Dcreate: ", dataset_id >= 0);
+ H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF_OBJ,
+ HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
+ HDF5Constants.H5P_DEFAULT, dset_data);
+ H5.H5Dclose(dataset_id);
+ }
+ catch (Exception ex) {
+ fail("testH5OcopyRefsDatasettodiffFile: create dataset failed");
+ }
+ finally {
+ try {H5.H5Dclose(dataset_id);} catch (Exception exx) {}
+ }
+
+ try {
+ //create new file
+ H5fid2 = H5.H5Fcreate("copy.h5", HDF5Constants.H5F_ACC_TRUNC,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettodiffFile.H5Fcreate: ", H5fid2 >= 0);
+ H5.H5Fflush(H5fid2, HDF5Constants.H5F_SCOPE_LOCAL);
+ }
+ catch (Exception ex) {
+ try {H5.H5Fclose(H5fid2);} catch (Exception exx) {}
+ fail("testH5OcopyRefsDatasettodiffFile: H5Fcreate failed");
+ }
+
+ try {
+ //create object copy property list id and set the flags.
+ ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
+ assertTrue("testH5OcopyRefsDatasettodiffFile.H5Pcreate: ", ocp_plist_id >= 0);
+ H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+
+ //Perform copy function.
+ H5.H5Ocopy(H5fid, ".", H5fid2, "CPYREFD", ocp_plist_id, HDF5Constants.H5P_DEFAULT);
+ }
+ catch (Exception ex){
+ ex.printStackTrace();
+ fail("testH5OcopyRefsDatasettodiffFile: H5Ocopy failed");
+ }
+ finally {
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception ex) {}
+ try {H5.H5Fclose(H5fid2);} catch (Exception ex) {}
+ }
+ _deleteFile("copy.h5");
+ }
+
+ @Test
+ public void testH5OcopyRefsDatasettosameFile() {
+ byte rbuf0[]=null , rbuf1[] = null;
+ byte[] dset_data = new byte[16];
+ long ocp_plist_id = HDF5Constants.H5I_INVALID_HID;
+ long dataset_id = HDF5Constants.H5I_INVALID_HID;
+ long did = HDF5Constants.H5I_INVALID_HID;
+ int obj_type = -1;
+ byte[] read_data = new byte[16];
+
+ try {
+ rbuf0 = H5.H5Rcreate(H5fid, "/G1", HDF5Constants.H5R_OBJECT, -1);
+ rbuf1 = H5.H5Rcreate(H5fid, "DS2", HDF5Constants.H5R_OBJECT, -1);
+ System.arraycopy(rbuf0, 0, dset_data, 0, 8);
+ System.arraycopy(rbuf1, 0, dset_data, 8, 8);
+
+ //Create a dataset and write object references to it.
+ dataset_id = H5.H5Dcreate(H5fid, "DSREF",
+ HDF5Constants.H5T_STD_REF_OBJ, H5dsid,
+ HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT, HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettosameFile.H5Dcreate: ", dataset_id >= 0);
+ H5.H5Dwrite(dataset_id, HDF5Constants.H5T_STD_REF_OBJ,
+ HDF5Constants.H5S_ALL, HDF5Constants.H5S_ALL,
+ HDF5Constants.H5P_DEFAULT, dset_data);
+ //Close the dataset.
+ H5.H5Dclose(dataset_id);
+ }
+ catch (Exception ex) {
+ try {H5.H5Dclose(dataset_id);} catch (Exception exx) {}
+ fail("testH5OcopyRefsDatasettosameFile: create dataset failed");
+ }
+
+ try {
+ ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
+ assertTrue("testH5OcopyRefsDatasettosameFile.H5Pcreate: ", ocp_plist_id >= 0);
+ H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+ }
+ catch (Exception ex) {
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
+ fail("testH5OcopyRefsDatasettosameFile: H5Pset_copy_object failed");
+ }
+
+ //Perform copy function.
+ try {
+ H5.H5Ocopy(H5fid, "DSREF", H5fid, "CPYREFD", ocp_plist_id, HDF5Constants.H5P_DEFAULT);
+ }
+ catch(Exception ex) {
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
+ fail("testH5OcopyRefsDatasettosameFile: H5Ocopy failed");
+ }
+
+ //Open the dataset that has been copied
+ try {
+ did = H5.H5Dopen(H5fid, "DSREF", HDF5Constants.H5P_DEFAULT);
+ assertTrue("testH5OcopyRefsDatasettosameFile.H5Dopen: ", did >= 0);
+ }
+ catch (Exception e) {
+ try {H5.H5Dclose(did);} catch (Exception exx) {}
+ e.printStackTrace();
+ fail("testH5OcopyRefsDatasettosameFile: H5Dopen failed");
+ }
+
+ try {
+ //Read the dataset object references in the read_data buffer.
+ H5.H5Dread(did, HDF5Constants.H5T_STD_REF_OBJ, HDF5Constants.H5S_ALL,HDF5Constants.H5S_ALL, HDF5Constants.H5P_DEFAULT, read_data);
+ System.arraycopy(read_data, 0, rbuf0, 0, 8);
+ System.arraycopy(read_data, 8, rbuf1, 0, 8);
+
+ //Get the type of object the reference points to.
+ obj_type = H5.H5Rget_obj_type(H5fid, HDF5Constants.H5R_OBJECT, rbuf1);
+ assertEquals(obj_type, HDF5Constants.H5O_TYPE_DATASET);
+
+ obj_type = H5.H5Rget_obj_type(H5fid, HDF5Constants.H5R_OBJECT, rbuf0);
+ assertEquals(obj_type, HDF5Constants.H5O_TYPE_GROUP);
+ }
+ catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ finally {
+ try {H5.H5Dclose(did);} catch (Exception ex) {}
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception ex) {}
+ }
+ }
+
+ @Test
+ public void testH5OcopyNullRef() throws Throwable {
+ final long _pid_ = HDF5Constants.H5P_DEFAULT;
+ long sid = HDF5Constants.H5I_INVALID_HID;
+ long did = HDF5Constants.H5I_INVALID_HID;
+ long aid = HDF5Constants.H5I_INVALID_HID;
+
+ try {
+ sid = H5.H5Screate_simple(1, new long[] {1}, null);
+ assertTrue("testH5OcopyNullRef.H5Screate_simple: ", sid >= 0);
+ did = H5.H5Dcreate(H5fid, "Dataset_with_null_Ref", HDF5Constants.H5T_NATIVE_INT, sid, _pid_, _pid_, _pid_);
+ assertTrue("testH5OcopyNullRef.H5Dcreate: ", did > 0);
+ aid = H5.H5Acreate(did, "Null_Ref", HDF5Constants.H5T_STD_REF_OBJ, sid, _pid_, _pid_);
+ assertTrue("testH5OcopyNullRef.H5Acreate: ", aid > 0);
+ }
+ catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ finally {
+ try {H5.H5Dclose(did);} catch (Exception exx) {}
+ try {H5.H5Aclose(aid);} catch (Exception exx) {}
+ try {H5.H5Sclose(sid);} catch (Exception exx) {}
+ }
+
+ long ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
+ assertTrue("testH5OcopyNullRef.H5Pcreate: ", ocp_plist_id >= 0);
+ H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+ try {
+ H5.H5Ocopy(H5fid, "/Dataset_with_null_Ref", H5fid, "/Dataset_with_null_Ref_cp", ocp_plist_id, _pid_);
+ }
+ finally {
+ try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
+ }
+ }
+
+// @Ignore because of JIRA HDF5-9547
+// @Test(expected = HDF5LibraryException.class)
+// public void testH5OcopyInvalidRef() throws Throwable {
+// final long _pid_ = HDF5Constants.H5P_DEFAULT;
+// long sid = HDF5Constants.H5I_INVALID_HID;
+// long did = HDF5Constants.H5I_INVALID_HID;
+// long aid = HDF5Constants.H5I_INVALID_HID;
+//
+// try {
+// sid = H5.H5Screate_simple(1, new long[] {1}, null);
+// assertTrue("testH5OcopyInvalidRef.H5Screate_simple: ", sid >= 0);
+// did = H5.H5Dcreate(H5fid, "Dataset_with_invalid_Ref", HDF5Constants.H5T_NATIVE_INT, sid, _pid_, _pid_, _pid_);
+// assertTrue("testH5OcopyInvalidRef.H5Dcreate: ", did > 0);
+// aid = H5.H5Acreate(did, "Invalid_Ref", HDF5Constants.H5T_STD_REF_OBJ, sid, _pid_, _pid_);
+// assertTrue("testH5OcopyInvalidRef.H5Acreate: ", aid > 0);
+// H5.H5Awrite(aid, HDF5Constants.H5T_STD_REF_OBJ, new long[]{-1});
+// }
+// catch (Exception ex) {
+// ex.printStackTrace();
+// }
+// finally {
+// try {H5.H5Dclose(did);} catch (Exception exx) {}
+// try {H5.H5Aclose(aid);} catch (Exception exx) {}
+// try {H5.H5Sclose(sid);} catch (Exception exx) {}
+// }
+//
+// long ocp_plist_id = H5.H5Pcreate(HDF5Constants.H5P_OBJECT_COPY);
+// assertTrue("testH5OcopyInvalidRef.H5Pcreate: ", ocp_plist_id >= 0);
+// H5.H5Pset_copy_object(ocp_plist_id, HDF5Constants.H5O_COPY_EXPAND_REFERENCE_FLAG);
+// try {
+// H5.H5Ocopy(H5fid, "/Dataset_with_invalid_Ref", H5fid, "/Dataset_with_invalid_Ref_cp", ocp_plist_id, _pid_);
+// }
+// finally {
+// try {H5.H5Pclose(ocp_plist_id);} catch (Exception exx) {}
+// }
+// }
+
+}
diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in
index 39db296..ca2b3a9 100644
--- a/java/test/junit.sh.in
+++ b/java/test/junit.sh.in
@@ -66,8 +66,8 @@ $top_builddir/java/src/jni/.libs/libhdf5_java.*
LIST_JAR_TESTFILES="
$HDFLIB_HOME/hamcrest-core.jar
$HDFLIB_HOME/junit.jar
-$HDFLIB_HOME/slf4j-api-1.7.25.jar
-$HDFLIB_HOME/ext/slf4j-simple-1.7.25.jar
+$HDFLIB_HOME/slf4j-api-1.7.33.jar
+$HDFLIB_HOME/ext/slf4j-simple-1.7.33.jar
"
LIST_JAR_FILES="
$top_builddir/java/src/$JARFILE
@@ -108,6 +108,7 @@ $HDFTEST_HOME/testfiles/JUnit-TestH5Arw.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Oparams.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Obasic.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Ocreate.txt
+$HDFTEST_HOME/testfiles/JUnit-TestH5OcopyOld.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5Ocopy.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5PL.txt
$HDFTEST_HOME/testfiles/JUnit-TestH5VL.txt
@@ -298,7 +299,7 @@ JAVAEXEFLAGS=@H5_JAVAFLAGS@
COPY_LIBFILES_TO_BLDLIBDIR
COPY_DATAFILES_TO_BLDDIR
-CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/junit.jar:"$BLDLIBDIR"/hamcrest-core.jar:"$BLDLIBDIR"/slf4j-api-1.7.25.jar:"$BLDLIBDIR"/slf4j-simple-1.7.25.jar:"$TESTJARFILE""
+CPATH=".:"$BLDLIBDIR"/"$JARFILE":"$BLDLIBDIR"/junit.jar:"$BLDLIBDIR"/hamcrest-core.jar:"$BLDLIBDIR"/slf4j-api-1.7.33.jar:"$BLDLIBDIR"/slf4j-simple-1.7.33.jar:"$TESTJARFILE""
TEST=/usr/bin/test
if [ ! -x /usr/bin/test ]
@@ -1030,6 +1031,27 @@ else
test yes = "$verbose" && $DIFF JUnit-TestH5Ocreate.txt JUnit-TestH5Ocreate.out |sed 's/^/ /'
fi
+echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5OcopyOld"
+TESTING JUnit-TestH5OcopyOld
+($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5OcopyOld > JUnit-TestH5OcopyOld.ext)
+
+# Extract file name, line number, version and thread IDs because they may be different
+sed -e 's/thread [0-9]*/thread (IDs)/' -e 's/: .*\.c /: (file name) /' \
+ -e 's/line [0-9]*/line (number)/' \
+ -e 's/Time: [0-9]*[\.,[0-9]*]*/Time: XXXX/' \
+ -e 's/v[1-9]*\.[0-9]*\./version (number)\./' \
+ -e 's/[1-9]*\.[0-9]*\.[0-9]*[^)]*/version (number)/' \
+ JUnit-TestH5OcopyOld.ext > JUnit-TestH5OcopyOld.out
+
+if diff JUnit-TestH5OcopyOld.out JUnit-TestH5OcopyOld.txt > /dev/null; then
+ echo " PASSED JUnit-TestH5OcopyOld"
+else
+ echo "**FAILED** JUnit-TestH5OcopyOld"
+ echo " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF JUnit-TestH5OcopyOld.txt JUnit-TestH5OcopyOld.out |sed 's/^/ /'
+fi
+
echo "$JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Ocopy"
TESTING JUnit-TestH5Ocopy
($RUNSERIAL $JAVAEXE $JAVAEXEFLAGS -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=trace -Djava.library.path=$BLDLIBDIR -cp $CLASSPATH -ea org.junit.runner.JUnitCore test.TestH5Ocopy > JUnit-TestH5Ocopy.ext)
diff --git a/java/test/testfiles/JUnit-TestH5Ocopy.txt b/java/test/testfiles/JUnit-TestH5Ocopy.txt
index 32dfde7..7f66410 100644
--- a/java/test/testfiles/JUnit-TestH5Ocopy.txt
+++ b/java/test/testfiles/JUnit-TestH5Ocopy.txt
@@ -1,9 +1,10 @@
JUnit version 4.11
.testH5OcopyRefsDatasettosameFile
+.testH5OcopyNullRef
.testH5OcopyRefsDatasettodiffFile
.testH5OcopyRefsAttr
Time: XXXX
-OK (3 tests)
+OK (4 tests)
diff --git a/java/test/testfiles/JUnit-TestH5OcopyOld.txt b/java/test/testfiles/JUnit-TestH5OcopyOld.txt
new file mode 100644
index 0000000..7f66410
--- /dev/null
+++ b/java/test/testfiles/JUnit-TestH5OcopyOld.txt
@@ -0,0 +1,10 @@
+JUnit version 4.11
+.testH5OcopyRefsDatasettosameFile
+.testH5OcopyNullRef
+.testH5OcopyRefsDatasettodiffFile
+.testH5OcopyRefsAttr
+
+Time: XXXX
+
+OK (4 tests)
+
diff --git a/release_docs/HISTORY-1_0-1_8_0_rc3.txt b/release_docs/HISTORY-1_0-1_8_0_rc3.txt
index f54ba64..3669f4d 100644
--- a/release_docs/HISTORY-1_0-1_8_0_rc3.txt
+++ b/release_docs/HISTORY-1_0-1_8_0_rc3.txt
@@ -1245,7 +1245,7 @@ Known Problems
causes failures in several HDF5 library tests.
* For HPUX 11.23 many tools tests failed for 64-bit version when linked to the
shared libraries (tested for 1.8.0-beta2)
-* For SNL, Red Storm: only paralle HDF5 is supported. The serial tests pass
+* For SNL, Red Storm: only parallel HDF5 is supported. The serial tests pass
and the parallel tests also pass with lots of non-fatal error messages.
* For LLNL, uP: both serial and parallel pass. Zeus: serial passes but
parallel fails with a known proglem in MPI. ubgl: serial passes but
diff --git a/release_docs/HISTORY-1_13.txt b/release_docs/HISTORY-1_13.txt
new file mode 100644
index 0000000..208b572
--- /dev/null
+++ b/release_docs/HISTORY-1_13.txt
@@ -0,0 +1,2168 @@
+HDF5 History
+============
+
+This file contains development history of the HDF5 1.13 releases from
+the develop branch
+
+01. Release Information for hdf5-1.13.0
+02. Release Information for hdf5-1.13.1
+
+[Search on the string '%%%%' for section breaks of each release.]
+
+%%%%1.13.0%%%%
+
+HDF5 version 1.13.0 released on 2021-12-01
+================================================================================
+
+
+INTRODUCTION
+============
+
+This document describes the differences between this release and the previous
+HDF5 release. It contains information on the platforms tested and known
+problems in this release. For more details check the HISTORY*.txt files in the
+HDF5 source.
+
+Note that documentation in the links below will be updated at the time of each
+final release.
+
+Links to HDF5 documentation can be found on The HDF5 web page:
+
+ https://portal.hdfgroup.org/display/HDF5/HDF5
+
+The official HDF5 releases can be obtained from:
+
+ https://www.hdfgroup.org/downloads/hdf5/
+
+Changes from Release to Release and New Features in the HDF5-1.13.x release series
+can be found at:
+
+ https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- New platforms, languages and compilers tested
+- Bug Fixes since HDF5-1.12.0
+- Platforms Tested
+- Known Problems
+- CMake vs. Autotools installations
+
+
+New Features
+============
+
+ Configuration:
+ -------------
+ - Added new configure option to support building parallel tools.
+ See Tools below (autotools - CMake):
+ --enable-parallel-tools HDF5_BUILD_PARALLEL_TOOLS
+
+ (RAW - 2021/10/25)
+
+ - Added new configure options to enable dimension scales APIs (H5DS*) to
+ use new object references with the native VOL connector (aka native HDF5
+ library). New references are always used for non-native terminal VOL
+ connectors (e.g., DAOS).
+
+ Autotools --enable-dimension-scales-with-new-ref
+ CMake HDF5_DIMENSION_SCALES_NEW_REF=ON
+
+ (EIP - 2021/10/25, HDFFV-11180)
+
+ - Refactored the utils folder.
+
+ Added subfolder test and moved the 'swmr_check_compat_vfd.c file'
+ from test into utils/test. Deleted the duplicate swmr_check_compat_vfd.c
+ file in hl/tools/h5watch folder. Also fixed vfd check options.
+
+ (ADB - 2021/10/18)
+
+ - Changed autotools and CMake configurations to derive both
+ compilation warnings-as-errors and warnings-only-warn configurations
+ from the same files, 'config/*/*error*'. Removed redundant files
+ 'config/*/*noerror*'.
+
+ (DCY - 2021/09/29)
+
+ - Added new option to control the build of High-Level tools
+ that default ON/enabled.
+
+ Add configure options (autotools - CMake):
+ --enable-hltools HDF5_BUILD_HL_TOOLS
+
+ Disabling this option prevents building the gif tool which
+ contains the following CVEs:
+ HDFFV-10592 CVE-2018-17433
+ HDFFV-10593 CVE-2018-17436
+ HDFFV-11048 CVE-2020-10809
+
+ (ADB - 2021/09/16, HDFFV-11266)
+
+ - Adds C++ Autotools configuration file for Intel
+
+ * Checks for icpc as the compiler
+ * Sets std=c++11
+ * Copies most non-warning flags from intel-flags
+
+ (DER - 2021/06/02)
+
+ - Adds C++ Autotools configuration file for PGI
+
+ * Checks for pgc++ as the compiler name (was: pgCC)
+ * Sets -std=c++11
+ * Other options basically match new C options (below)
+
+ (DER - 2021/06/02)
+
+ - Updates PGI C options
+
+ * -Minform set to warn (was: inform) to suppress spurious messages
+ * Sets -gopt -O2 as debug options
+ * Sets -O4 as 'high optimization' option
+ * Sets -O0 as 'no optimization' option
+ * Removes specific settings for PGI 9 and 10
+
+ (DER - 2021/06/02)
+
+ - A C++11-compliant compiler is now required to build the C++ wrappers
+
+ CMAKE_CXX_STANDARD is now set to 11 when building with CMake and
+ -std=c++11 is added when building with clang/gcc via the Autotools.
+
+ (DER - 2021/05/27)
+
+ - CMake will now run the shell script tests in test/ by default
+
+ The test directory includes several shell script tests that previously
+ were not run by CMake. These are now run by default. TEST_SHELL_SCRIPTS
+ has been set to ON and SH_PROGRAM has been set to bash (some test
+ scripts use bash-isms). Platforms without bash (e.g., Windows) will
+ ignore the script tests.
+
+ (DER - 2021/05/23)
+
+ - Removed unused HDF5_ENABLE_HSIZET option from CMake
+
+ This has been unused for some time and has no effect.
+
+ (DER - 2021/05/23)
+
+ - CMake no longer builds the C++ library by default
+
+ HDF5_BUILD_CPP_LIB now defaults to OFF, which is in line with the
+ Autotools build defaults.
+
+ (DER - 2021/04/20)
+
+ - Removal of pre-VS2015 work-arounds
+
+ HDF5 now requires Visual Studio 2015 or greater, so old work-around
+ code and definitions have been removed, including:
+
+ * <inttypes.h>
+ * snprintf and vsnprintf
+ * llround, llroundf, lround, lroundf, round, roundf
+ * strtoll and strtoull
+ * va_copy
+ * struct timespec
+
+ (DER - 2021/03/22)
+
+ - Add CMake variable HDF5_LIB_INFIX
+
+ This infix is added to all library names after 'hdf5'.
+ e.g. the infix '_openmpi' results in the library name 'libhdf5_openmpi.so'
+ This name is used in packages on debian based systems.
+ (see https://packages.debian.org/jessie/amd64/libhdf5-openmpi-8/filelist)
+
+ (barcode - 2021/03/22)
+
+ - On macOS, Universal Binaries can now be built, allowing native execution on
+ both Intel and Apple Silicon (ARM) based Macs.
+
+ To do so, set CMAKE_OSX_ARCHITECTURES="x86_64;arm64"
+
+ (SAM - 2021/02/07, github-311)
+
+ - Added a configure-time option to control certain compiler warnings
+ diagnostics
+
+ A new configure-time option was added that allows some compiler warnings
+ diagnostics to have the default operation. This is mainly intended for
+ library developers and currently only works for gcc 10 and above. The
+ diagnostics flags apply to C, C++ and Fortran compilers and will appear
+ in "H5 C Flags", H5 C++ Flags" and H5 Fortran Flags, respectively. They
+ will NOT be exported to h5cc, etc.
+
+ The default is OFF, which will disable the warnings URL and color attributes
+ for the warnings output. ON will not add the flags and allow default behavior.
+
+ Autotools: --enable-diags
+
+ CMake: HDF5_ENABLE_BUILD_DIAGS
+
+ (ADB - 2021/02/05, HDFFV-11213)
+
+ - CMake option to build the HDF filter plugins project as an external project
+
+ The HDF filter plugins project is a collection of registered compression
+ filters that can be dynamically loaded when needed to access data stored
+ in a hdf5 file. This CMake-only option allows the plugins to be built and
+ distributed with the hdf5 library and tools. Like the options for szip and
+ zlib, either a tgz file or a git repository can be specified for the source.
+
+ The option was refactored to use the CMake FetchContent process. This allows
+ more control over the filter targets, but required external project command
+ options to be moved to a CMake include file, HDF5PluginCache.cmake. Also
+ enabled the filter examples to be used as tests for operation of the
+ filter plugins.
+
+ (ADB - 2020/12/10, OESS-98)
+
+ - FreeBSD Autotools configuration now defaults to 'cc' and 'c++' compilers
+
+ On FreeBSD, the autotools defaulted to 'gcc' as the C compiler and did
+ not process C++ options. Since FreeBSD 10, the default compiler has
+ been clang (via 'cc').
+
+ The default compilers have been set to 'cc' for C and 'c++' for C++,
+ which will pick up clang and clang++ respectively on FreeBSD 10+.
+ Additionally, clang options are now set correctly for both C and C++
+ and g++ options will now be set if that compiler is being used (an
+ omission from the former functionality).
+
+ (DER - 2020/11/28, HDFFV-11193)
+
+ - Fixed POSIX problems when building w/ gcc on Solaris
+
+ When building on Solaris using gcc, the POSIX symbols were not
+ being set correctly, which could lead to issues like clock_gettime()
+ not being found.
+
+ The standard is now set to gnu99 when building with gcc on Solaris,
+ which allows POSIX things to be #defined and linked correctly. This
+ differs slightly from the gcc norm, where we set the standard to c99
+ and manually set POSIX #define symbols.
+
+ (DER - 2020/11/25, HDFFV-11191)
+
+ - Added a configure-time option to consider certain compiler warnings
+ as errors
+
+ A new configure-time option was added that converts some compiler warnings
+ to errors. This is mainly intended for library developers and currently
+ only works for gcc and clang. The warnings that are considered errors
+ will appear in the generated libhdf5.settings file. These warnings apply
+ to C and C++ code and will appear in "H5 C Flags" and H5 C++ Flags",
+ respectively. They will NOT be exported to h5cc, etc.
+
+ The default is OFF. Building with this option may fail when compiling
+ on operating systems and with compiler versions not commonly used by
+ the library developers. Compilation may also fail when headers not
+ under the control of the library developers (e.g., mpi.h, hdfs.h) raise
+ warnings.
+
+ Autotools: --enable-warnings-as-errors
+
+ CMake: HDF5_ENABLE_WARNINGS_AS_ERRORS
+
+ (DER - 2020/11/23, HDFFV-11189)
+
+ - Autotools and CMake target added to produce doxygen generated documentation
+
+ The default is OFF or disabled.
+ Autoconf option is '--enable-doxygen'
+ autotools make target is 'doxygen' and will build all doxygen targets
+ CMake configure option is 'HDF5_BUILD_DOC'.
+ CMake target is 'doxygen' for all available doxygen targets
+ CMake target is 'hdf5lib_doc' for the src subdirectory
+
+ (ADB - 2020/11/03)
+
+ - CMake option to use MSVC naming conventions with MinGW
+
+ HDF5_MSVC_NAMING_CONVENTION option enable to use MSVC naming conventions
+ when using a MinGW toolchain
+
+ (xan - 2020/10/30)
+
+ - CMake option to statically link gcc libs with MinGW
+
+ HDF5_MINGW_STATIC_GCC_LIBS allows to statically link libg/libstdc++
+ with the MinGW toolchain
+
+ (xan - 2020/10/30)
+
+ - CMake option to build the HDF filter plugins project as an external project
+
+ The HDF filter plugins project is a collection of registered compression
+ filters that can be dynamically loaded when needed to access data stored
+ in a hdf5 file. This CMake-only option allows the plugins to be built and
+ distributed with the hdf5 library and tools. Like the options for szip and
+ zlib, either a tgz file or a git repository can be specified for the source.
+
+ The necessary options are (see the INSTALL_CMake.txt file):
+ HDF5_ENABLE_PLUGIN_SUPPORT
+ PLUGIN_TGZ_NAME or PLUGIN_GIT_URL
+ There are more options necessary for various filters and the plugin project
+ documents should be referenced.
+
+ (ADB - 2020/09/27, OESS-98)
+
+ - Added CMake option to format source files
+
+ HDF5_ENABLE_FORMATTERS option will enable creation of targets using the
+ pattern - HDF5_*_SRC_FORMAT - where * corresponds to the source folder
+ or tool folder. All sources can be formatted by executing the format target;
+ make format
+
+ (ADB - 2020/08/24)
+
+ - Add file locking configure and CMake options
+
+ HDF5 1.10.0 introduced a file locking scheme, primarily to help
+ enforce SWMR setup. Formerly, the only user-level control of the scheme
+ was via the HDF5_USE_FILE_LOCKING environment variable.
+
+ This change introduces configure-time options that control whether
+ or not file locking will be used and whether or not the library
+ ignores errors when locking has been disabled on the file system
+ (useful on some HPC Lustre installations).
+
+ In both the Autotools and CMake, the settings have the effect of changing
+ the default property list settings (see the H5Pset/get_file_locking()
+ entry, below).
+
+ The yes/no/best-effort file locking configure setting has also been
+ added to the libhdf5.settings file.
+
+ Autotools:
+
+ An --enable-file-locking=(yes|no|best-effort) option has been added.
+
+ yes: Use file locking.
+ no: Do not use file locking.
+ best-effort: Use file locking and ignore "disabled" errors.
+
+ CMake:
+
+ Two self-explanatory options have been added:
+
+ HDF5_USE_FILE_LOCKING
+ HDF5_IGNORE_DISABLED_FILE_LOCKS
+
+ Setting both of these to ON is the equivalent to the Autotools'
+ best-effort setting.
+
+ NOTE:
+ The precedence order of the various file locking control mechanisms is:
+
+ 1) HDF5_USE_FILE_LOCKING environment variable (highest)
+
+ 2) H5Pset_file_locking()
+
+ 3) configure/CMake options (which set the property list defaults)
+
+ 4) library defaults (currently best-effort)
+
+ (DER - 2020/07/30, HDFFV-11092)
+
+ - CMake option to link the generated Fortran MOD files into the include
+ directory.
+
+ The Fortran generation of MOD files by a Fortran compile can produce
+ different binary files between SHARED and STATIC compiles with different
+ compilers and/or different platforms. Note that it has been found that
+ different versions of Fortran compilers will produce incompatible MOD
+ files. Currently, CMake will locate these MOD files in subfolders of
+ the include directory and add that path to the Fortran library target
+ in the CMake config file, which can be used by the CMake find library
+ process. For other build systems using the binary from a CMake install,
+ a new CMake configuration can be used to copy the pre-chosen version
+ of the Fortran MOD files into the install include directory.
+
+ The default will depend on the configuration of
+ BUILD_STATIC_LIBS and BUILD_SHARED_LIBS:
+ YES YES Default to SHARED
+ YES NO Default to STATIC
+ NO YES Default to SHARED
+ NO NO Default to SHARED
+ The defaults can be overridden by setting the config option
+ HDF5_INSTALL_MOD_FORTRAN to one of NO, SHARED, or STATIC
+
+ (ADB - 2020/07/09, HDFFV-11116)
+
+ - CMake option to use AEC (open source SZip) library instead of SZip
+
+ The open source AEC library is a replacement library for SZip. In
+ order to use it for hdf5 the libaec CMake source was changed to add
+ "-fPIC" and exclude test files. Autotools does not build the
+ compression libraries within hdf5 builds. New option USE_LIBAEC is
+ required to compensate for the different files produced by AEC build.
+
+ (ADB - 2020/04/22, OESS-65)
+
+ - CMake ConfigureChecks.cmake file now uses CHECK_STRUCT_HAS_MEMBER
+
+ Some handcrafted tests in HDFTests.c has been removed and the CMake
+ CHECK_STRUCT_HAS_MEMBER module has been used.
+
+ (ADB - 2020/03/24, TRILAB-24)
+
+ - Both build systems use same set of warnings flags
+
+ GNU C, C++ and gfortran warnings flags were moved to files in a config
+ sub-folder named gnu-warnings. Flags that only are available for a specific
+ version of the compiler are in files named with that version.
+ Clang C warnings flags were moved to files in a config sub-folder
+ named clang-warnings.
+ Intel C, Fortran warnings flags were moved to files in a config sub-folder
+ named intel-warnings.
+
+ There are flags in named "error-xxx" files with warnings that may
+ be promoted to errors. Some source files may still need fixes.
+
+ There are also pairs of files named "developer-xxx" and "no-developer-xxx"
+ that are chosen by the CMake option:HDF5_ENABLE_DEV_WARNINGS or the
+ configure option:--enable-developer-warnings.
+
+ In addition, CMake no longer applies these warnings for examples.
+
+ (ADB - 2020/03/24, TRILAB-192)
+
+ - Added test script for file size compare
+
+ If CMake minimum version is at least 3.14, the fileCompareTest.cmake
+ script will compare file sizes.
+
+ (ADB - 2020/02/24, HDFFV-11036)
+
+ - Update CMake minimum version to 3.12
+
+ Updated CMake minimum version to 3.12 and added version checks
+ for Windows features.
+
+ (ADB - 2020/02/05, TRILABS-142)
+
+ - Fixed CMake include properties for Fortran libraries
+
+ Corrected the library properties for Fortran to use the
+ correct path for the Fortran module files.
+
+ (ADB - 2020/02/04, HDFFV-11012)
+
+ - Added common warnings files for gnu and intel
+
+ Added warnings files to use one common set of flags
+ during configure for both autotools and CMake build
+ systems. The initial implementation only affects a
+ general set of flags for gnu and intel compilers.
+
+ (ADB - 2020/01/17)
+
+ - Added new options to CMake for control of testing
+
+ Added CMake options (default ON);
+ HDF5_TEST_SERIAL AND/OR HDF5_TEST_PARALLEL
+ combined with:
+ HDF5_TEST_TOOLS
+ HDF5_TEST_EXAMPLES
+ HDF5_TEST_SWMR
+ HDF5_TEST_FORTRAN
+ HDF5_TEST_CPP
+ HDF5_TEST_JAVA
+
+ (ADB - 2020/01/15, HDFFV-11001)
+
+ - Added Clang sanitizers to CMake for analyzer support if compiler is clang.
+
+ Added CMake code and files to execute the Clang sanitizers if
+ HDF5_ENABLE_SANITIZERS is enabled and the USE_SANITIZER option
+ is set to one of the following:
+ Address
+ Memory
+ MemoryWithOrigins
+ Undefined
+ Thread
+ Leak
+ 'Address;Undefined'
+
+ (ADB - 2019/12/12, TRILAB-135)
+
+ - Update CMake for VS2019 support
+
+ CMake added support for VS2019 in version 3.15. Changes to the CMake
+ generator setting required changes to scripts. Also updated version
+ references in CMake files as necessary.
+
+ (ADB - 2019/11/18, HDFFV-10962)
+
+ - Update CMake options to match new autotools options
+
+ Add configure options (autotools - CMake):
+ enable-asserts HDF5_ENABLE_ASSERTS
+ enable-symbols HDF5_ENABLE_SYMBOLS
+ enable-profiling HDF5_ENABLE_PROFILING
+ enable-optimization HDF5_ENABLE_OPTIMIZATION
+ In addition NDEBUG is no longer forced defined and relies on the CMake
+ process.
+
+ (ADB - 2019/10/07, HDFFV-100901, HDFFV-10637, TRILAB-97)
+
+
+ Library:
+ --------
+ - Adds new file driver-level memory copy operation for
+ "ctl" callback and updates compact dataset I/O routines
+ to utilize it
+
+ When accessing an HDF5 file with a file driver that uses
+ memory allocated in special ways (e.g., without standard
+ library's `malloc`), a crash could be observed when HDF5
+ tries to perform `memcpy` operations on such a memory
+ region.
+
+ These changes add a new H5FD_FEAT_MEMMANAGE VFD feature
+ flag, which, if specified as supported by a VFD, will
+ inform HDF5 that the VFD either uses special memory
+ management routines or wishes to perform memory management
+ in a specific way. Therefore, this flag instructs HDF5 to
+ ask the file driver to perform memory management for
+ certain operations.
+
+ These changes also introduce a new "ctl" callback
+ operation identified by the H5FD_CTL__MEM_COPY op code.
+ This operation simply asks a VFD to perform a memory copy.
+ The arguments to this operation are passed to the "ctl"
+ callback's "input" parameter as a pointer to a struct
+ defined as:
+
+ struct H5FD_ctl_memcpy_args_t {
+ void * dstbuf; /**< Destination buffer */
+ hsize_t dst_off; /**< Offset within destination buffer */
+ const void *srcbuf; /**< Source buffer */
+ hsize_t src_off; /**< Offset within source buffer */
+ size_t len; /**< Length of data to copy from source buffer */
+ } H5FD_ctl_memcpy_args_t;
+
+ Further, HDF5's compact dataset I/O routines were
+ identified as a problematic area that could cause a crash
+ for VFDs that make use of special memory management. Those
+ I/O routines were therefore updated to make use of this new
+ "ctl" callback operation in order to ask the underlying
+ file driver to correctly handle memory copies.
+
+ (JTH - 2021/09/28)
+
+ - Adds new "ctl" callback to VFD H5FD_class_t structure
+ with the following prototype:
+
+ herr_t (*ctl)(H5FD_t *file, uint64_t op_code,
+ uint64_t flags, const void *input,
+ void **output);
+
+ This newly-added "ctl" callback allows Virtual File
+ Drivers to intercept and handle arbitrary operations
+ identified by an operation code. Its parameters are
+ as follows:
+
+ `file` [in] - A pointer to the file to be operated on
+ `op_code` [in] - The operation code identifying the
+ operation to be performed
+ `flags` [in] - Flags governing the behavior of the
+ operation performed (see H5FDpublic.h
+ for a list of valid flags)
+ `input` [in] - A pointer to arguments passed to the
+ VFD performing the operation
+ `output` [out] - A pointer for the receiving VFD to
+ use for output from the operation
+
+ (JRM - 2021/08/16)
+
+ - Change how the release part of version, in major.minor.release is checked
+ for compatibility
+
+ The HDF5 library uses a function, H5check_version, to check that
+ the version defined in the header files, which is used to compile an
+ application is compatible with the version codified in the library, which
+ the application loads at runtime. This previously required an exact match
+ or the library would print a warning, dump the build settings and then
+ abort or continue. An environment variable controlled the logic.
+
+ Now the function first checks that the library release version, in
+ major.minor.release, is not older than the version in the headers.
+ Secondly, if the release version is different, it checks if either
+ the library version or the header version is in the exception list, in
+ which case the release part of version, in major.minor.release, must
+ be exact. An environment variable still controls the logic.
+
+ (ADB - 2021/07/27)
+
+ - gcc warning suppression macros were moved out of H5public.h
+
+ The HDF5 library uses a set of macros to suppress warnings on gcc.
+ These warnings were originally located in H5public.h so that the
+ multi VFD (which only uses public headers) could also make use of them
+ but internal macros should not be publicly exposed like this.
+
+ These macros have now been moved to H5private.h. Pending future multi
+ VFD refactoring, the macros have been duplicated in H5FDmulti.c to
+ suppress the format string warnings there.
+
+ (DER - 2021/06/03)
+
+ - H5Gcreate1() now rejects size_hint parameters larger than UINT32_MAX
+
+ The size_hint value is ultimately stored in a uint32_t struct field,
+ so specifying a value larger than this on a 64-bit machine can cause
+ undefined behavior including crashing the system.
+
+ The documentation for this API call was also incorrect, stating that
+ passing a negative value would cause the library to use a default
+ value. Instead, passing a "negative" value actually passes a very large
+ value, which is probably not what the user intends and can cause
+ crashes on 64-bit systems.
+
+ The Doxygen documentation has been updated and passing values larger
+ than UINT32_MAX for size_hint will now produce a normal HDF5 error.
+
+ (DER - 2021/04/29, HDFFV-11241)
+
+
+ - H5Pset_fapl_log() no longer crashes when passed an invalid fapl ID
+
+ When passed an invalid fapl ID, H5Pset_fapl_log() would usually
+ segfault when attempting to free an uninitialized pointer in the error
+ handling code. This behavior is more common in release builds or
+ when the memory sanitization checks were not selected as a build
+ option.
+
+ The pointer is now correctly initialized and the API call now
+ produces a normal HDF5 error when fed an invalid fapl ID.
+
+ (DER - 2021/04/28, HDFFV-11240)
+
+ - Fixes a segfault when H5Pset_mdc_log_options() is called multiple times
+
+ The call incorrectly attempts to free an internal copy of the previous
+ log location string, which causes a segfault. This only happens
+ when the call is invoked multiple times on the same property list.
+ On the first call to a given fapl, the log location is set to NULL so
+ the segfault does not occur.
+
+ The string is now handled properly and the segfault no longer occurs.
+
+ (DER - 2021/04/27, HDFFV-11239)
+
+ - HSYS_GOTO_ERROR now emits the results of GetLastError() on Windows
+
+ HSYS_GOTO_ERROR is an internal macro that is used to produce error
+ messages when system calls fail. These strings include errno and the
+ the associated strerror() value, which are not particularly useful
+ when a Win32 API call fails.
+
+ On Windows, this macro has been updated to include the result of
+ GetLastError(). When a system call fails on Windows, usually only
+ one of errno and GetLastError() will be useful, however we emit both
+ for the user to parse. The Windows error message is not emitted as
+ it would be awkward to free the FormatMessage() buffer given the
+ existing HDF5 error framework. Users will have to look up the error
+ codes in MSDN.
+
+ The format string on Windows has been changed from:
+
+ "%s, errno = %d, error message = '%s'"
+
+ to:
+
+ "%s, errno = %d, error message = '%s', Win32 GetLastError() = %"PRIu32""
+
+ for those inclined to parse it for error values.
+
+ (DER - 2021/03/21)
+
+ - File locking now works on Windows
+
+ Since version 1.10.0, the HDF5 library has used a file locking scheme
+ to help enforce one reader at a time accessing an HDF5 file, which can
+ be helpful when setting up readers and writers to use the single-
+ writer/multiple-readers (SWMR) access pattern.
+
+ In the past, this was only functional on POSIX systems where flock() or
+ fcntl() were present. Windows used a no-op stub that always succeeded.
+
+ HDF5 now uses LockFileEx() and UnlockFileEx() to lock the file using the
+ same scheme as POSIX systems. We lock the entire file when we set up the
+ locks (by passing DWORDMAX as both size parameters to LockFileEx()).
+
+ (DER - 2021/03/19, HDFFV-10191)
+
+ - H5Epush_ret() now requires a trailing semicolon
+
+ H5Epush_ret() is a function-like macro that has been changed to
+ contain a `do {} while(0)` loop. Consequently, a trailing semicolon
+ is now required to end the `while` statement. Previously, a trailing
+ semi would work, but was not mandatory. This change was made to allow
+ clang-format to correctly format the source code.
+
+ (SAM - 2021/03/03)
+
+ - Improved performance of H5Sget_select_elem_pointlist
+
+ Modified library to cache the point after the last block of points
+ retrieved by H5Sget_select_elem_pointlist, so a subsequent call to the
+ same function to retrieve the next block of points from the list can
+ proceed immediately without needing to iterate over the point list.
+
+ (NAF - 2021/01/19)
+
+ - Replaced H5E_ATOM with H5E_ID in H5Epubgen.h
+
+ The term "atom" is archaic and not in line with current HDF5 library
+ terminology, which uses "ID" instead. "Atom" has mostly been purged
+ from the library internals and this change removes H5E_ATOM from
+ the H5Epubgen.h (exposed via H5Epublic.h) and replaces it with
+ H5E_ID.
+
+ (DER - 2020/11/24, HDFFV-11190)
+
+ - Add a new public function H5Ssel_iter_reset
+
+ This function resets a dataspace selection iterator back to an
+ initial state so that it may be used for iteration once more.
+ This can be useful when needing to iterate over a selection
+ multiple times without having to repeatedly create/destroy
+ a selection iterator for that dataspace selection.
+
+ (JTH - 2020/09/18)
+
+ - Remove HDFS VFD stubs
+
+ The original implementation of the HDFS VFD included non-functional
+ versions of the following public API calls when the HDFS VFD is
+ not built as a part of the HDF5 library:
+
+ * H5FD_hdfs_init()
+ * H5Pget_fapl_hdfs()
+ * H5Pset_fapl_hdfs()
+
+ They will remain present in HDF5 1.10 and HDF5 1.12 releases
+ for binary compatibility purposes but have been removed as of 1.14.0.
+
+ Note that this has nothing to do with the real HDFS VFD API calls
+ that are fully functional when the HDFS VFD is configured and built.
+
+ We simply changed:
+
+ #ifdef LIBHDFS
+ <real API call>
+ #else
+ <useless stub>
+ #endif
+
+ to:
+
+ #ifdef LIBHDFS
+ <real API call>
+ #endif
+
+ Which is how the other optional VFDs are handled.
+
+ (DER - 2020/08/27)
+
+ - Add Mirror VFD
+
+ Use TCP/IP sockets to perform write-only (W/O) file I/O on a remote
+ machine. Must be used in conjunction with the Splitter VFD.
+
+ (JOS - 2020/03/13, TBD)
+
+ - Add Splitter VFD
+
+ Maintain separate R/W and W/O channels for "concurrent" file writes
+ to two files using a single HDF5 file handle.
+
+ (JOS - 2020/03/13, TBD)
+
+ - Refactored public exposure of haddr_t type in favor of "object tokens"
+
+ To better accommodate HDF5 VOL connectors where "object addresses in a file"
+ may not make much sense, the following changes were made to the library:
+
+ * Introduced new H5O_token_t "object token" type, which represents a
+ unique and permanent identifier for referencing an HDF5 object within
+ a container; these "object tokens" are meant to replace object addresses.
+ Along with the new type, a new H5Oopen_by_token API call was introduced
+ to open an object by a token, similar to how object addresses were
+ previously used with H5Oopen_by_addr.
+
+ * Introduced new H5Lget_info2, H5Lget_info_by_idx2, H5Literate2, H5Literate_by_name2,
+ H5Lvisit2 and H5Lvisit_by_name2 API calls, along with their associated H5L_info2_t
+ struct and H5L_iterate2_t callback function, which work with the newly-introduced
+ object tokens, instead of object addresses. The original functions have been
+ renamed to version 1 functions and are deprecated in favor of the new version 2
+ functions. The H5L_info_t and H5L_iterate_t types have been renamed to version 1
+ types and are now deprecated in favor of their version 2 counterparts. For each of
+ the functions and types, compatibility macros take place of the original symbols.
+
+ * Introduced new H5Oget_info3, H5Oget_info_by_name3, H5Oget_info_by_idx3,
+ H5Ovisit3 and H5Ovisit_by_name3 API calls, along with their associated H5O_info2_t
+ struct and H5O_iterate2_t callback function, which work with the newly-introduced
+ object tokens, instead of object addresses. The version 2 functions are now
+ deprecated in favor of the version 3 functions. The H5O_info_t and H5O_iterate_t
+ types have been renamed to version 1 types and are now deprecated in favor of their
+ version 2 counterparts. For each, compatibility macros take place of the original
+ symbols.
+
+ * Introduced new H5Oget_native_info, H5Oget_native_info_by_name and
+ H5Oget_native_info_by_idx API calls, along with their associated H5O_native_info_t
+ struct, which are used to retrieve the native HDF5 file format-specific information
+ about an object. This information (such as object header info and B-tree/heap info)
+ has been removed from the new H5O_info2_t struct so that the more generic
+ H5Oget_info(_by_name/_by_idx)3 routines will not try to retrieve it for non-native
+ VOL connectors.
+
+ * Added new H5Otoken_cmp, H5Otoken_to_str and H5Otoken_from_str routines to compare
+ two object tokens, convert an object token into a nicely-readable string format and
+ to convert an object token string back into a real object token, respectively.
+
+ (DER, QAK, JTH - 2020/01/16)
+
+ - Add new public function H5Sselect_adjust.
+
+ This function shifts a dataspace selection by a specified logical offset
+ within the dataspace extent. This can be useful for VOL developers to
+ implement chunked datasets.
+
+ (NAF - 2019/11/18)
+
+ - Add new public function H5Sselect_project_intersection.
+
+ This function computes the intersection between two dataspace selections
+ and projects that intersection into a third selection. This can be useful
+ for VOL developers to implement chunked or virtual datasets.
+
+ (NAF - 2019/11/13, ID-148)
+
+ - Add new public function H5VLget_file_type.
+
+ This function returns a datatype equivalent to the supplied datatype but
+ with the location set to be in the file. This datatype can then be used
+ with H5Tconvert to convert data between file and in-memory representation.
+ This function is intended for use only by VOL connector developers.
+
+ (NAF - 2019/11/08, ID-127)
+
+
+ Parallel Library:
+ -----------------
+ -
+
+
+ Fortran Library:
+ ----------------
+ - H5Fget_name_f fixed to handle correctly trailing whitespaces and
+ newly allocated buffers.
+
+ (MSB - 2021/08/30, github-826,972)
+
+ - Add wrappers for H5Pset/get_file_locking() API calls
+
+ h5pget_file_locking_f()
+ h5pset_file_locking_f()
+
+ See the configure option discussion for HDFFV-11092 (above) for more
+ information on the file locking feature and how it's controlled.
+
+ (DER - 2020/07/30, HDFFV-11092)
+
+ C++ Library:
+ ------------
+ - Add wrappers for H5Pset/get_file_locking() API calls
+
+ FileAccPropList::setFileLocking()
+ FileAccPropList::getFileLocking()
+
+ See the configure option discussion for HDFFV-11092 (above) for more
+ information on the file locking feature and how it's controlled.
+
+ (DER - 2020/07/30, HDFFV-11092)
+
+
+ Java Library:
+ -------------
+ - Replaced HDF5AtomException with HDF5IdException
+
+ Since H5E_ATOM changed to H5E_ID in the C library, the Java exception
+ that wraps the error category was also renamed. Its functionality
+ remains unchanged aside from the name.
+
+ (See also the HDFFV-11190 note in the C library section)
+
+ (DER - 2020/11/24, HDFFV-11190)
+
+ - Added new H5S functions.
+
+ H5Sselect_copy, H5Sselect_shape_same, H5Sselect_adjust,
+ H5Sselect_intersect_block, H5Sselect_project_intersection,
+ H5Scombine_hyperslab, H5Smodify_select, H5Scombine_select
+ wrapper functions added.
+
+ (ADB - 2020/10/27, HDFFV-10868)
+
+ - Add wrappers for H5Pset/get_file_locking() API calls
+
+ H5Pset_file_locking()
+ H5Pget_use_file_locking()
+ H5Pget_ignore_disabled_file_locking()
+
+ Unlike the C++ and Fortran wrappers, there are separate getters for the
+ two file locking settings, each of which returns a boolean value.
+
+ See the configure option discussion for HDFFV-11092 (above) for more
+ information on the file locking feature and how it's controlled.
+
+ (DER - 2020/07/30, HDFFV-11092)
+
+ - Added ability to test java library with VOLs.
+
+ Created a new CMake script that combines the java and vol test scripts.
+
+ (ADB - 2020/02/03, HDFFV-10996)
+
+ - Tests fail for non-English locales.
+
+ In the JUnit tests with a non-English locale, only the part before
+ the decimal comma is replaced by XXXX and this leads to a comparison
+ error. Changed the regex for the Time substitution.
+
+ (ADB - 2020/01/09, HDFFV-10995)
+
+
+ Tools:
+ ------
+ - h5repack added an optional verbose value for reporting R/W timing.
+
+ In addition to adding timing capture around the read/write calls in
+ h5repack, added help text to indicate how to show timing for read/write;
+ -v N, --verbose=N Verbose mode, print object information.
+ N - is an integer greater than 1, 2 displays read/write timing
+ (ADB - 2021/11/08)
+
+ - Added a new (unix ONLY) parallel meta tool 'h5dwalk', which utilizes the
+ mpifileutils (https://hpc.github.io/mpifileutils) open source utility
+ library to enable parallel execution of other HDF5 tools.
+ This approach can greatly enhance the serial hdf5 tool performance over large
+ collections of files by utilizing MPI parallelism to distribute an application
+ load over many independent MPI ranks and files.
+
+ An introduction to the mpifileutils library and initial 'User Guide' for
+ the new 'h5dwalk" tool can be found at:
+ https://github.com/HDFGroup/hdf5doc/tree/master/RFCs/HDF5/tools/parallel_tools
+
+ (RAW - 2021/10/25)
+
+ - Refactored the perform tools and removed depends on test library.
+
+ Moved the perf and h5perf tools from tools/test/perform to
+ tools/src/h5perf so that they can be installed. This required
+ that the test library dependency be removed by copying the
+ needed functions from h5test.c.
+ The standalone scripts and other perform tools remain in the
+ tools/test/perform folder.
+
+ (ADB - 2021/08/10)
+
+ - Removed partial long exceptions
+
+ Some of the tools accepted shortened versions of the long options
+ (ex: --datas instead of --dataset). These were implemented inconsistently,
+ are difficult to maintian, and occasionally block useful long option
+ names. These partial long options have been removed from all the tools.
+
+ (DER - 2021/08/03)
+
+ - h5repack added help text for user-defined filters.
+
+ Added help text line that states the valid values of the filter flag
+ for user-defined filters;
+ filter_flag: 1 is OPTIONAL or 0 is MANDATORY
+
+ (ADB - 2021/01/14, HDFFV-11099)
+
+ - Added h5delete tool
+
+ Deleting HDF5 storage when using the VOL can be tricky when the VOL
+ does not create files. The h5delete tool is a simple wrapper around
+ the H5Fdelete() API call that uses the VOL specified in the
+ HDF5_VOL_CONNECTOR environment variable to delete a "file". If
+ the call to H5Fdelete() fails, the tool will attempt to use
+ the POSIX remove(3) call to remove the file.
+
+ Note that the HDF5 library does currently have support for
+ H5Fdelete() in the native VOL connector.
+
+ (DER - 2020/12/16)
+
+ - h5repack added options to control how external links are handled.
+
+ Currently h5repack preserves external links and cannot copy and merge
+ data from the external files. Two options, merge and prune, were added to
+ control how to merge data from an external link into the resulting file.
+ --merge Follow external soft link recursively and merge data.
+ --prune Do not follow external soft links and remove link.
+ --merge --prune Follow external link, merge data and remove dangling link.
+
+ (ADB - 2020/08/05, HDFFV-9984)
+
+ - h5repack was fixed to repack the reference attributes properly.
+ The code line that checks if the update of reference inside a compound
+ datatype is misplaced outside the code block loop that carries out the
+ check. In consequence, the next attribute that is not the reference
+ type was repacked again as the reference type and caused the failure of
+ repacking. The fix is to move the corresponding code line to the correct
+ code block.
+
+ (KY -2020/02/07, HDFFV-11014)
+
+ - h5diff was updated to use the new reference APIs.
+
+ h5diff uses the new reference APIs to compare references.
+ Attribute references can also be compared.
+
+ (ADB - 2019/12/19, HDFFV-10980)
+
+ - h5dump and h5ls were updated to use the new reference APIs.
+
+ The tools library now use the new reference APIs to inspect a
+ file. Also the DDL spec was updated to reflect the format
+ changes produced with the new APIs. The export API and support
+ functions in the JNI were updated to match.
+
+ (ADB - 2019/12/06, HDFFV-10876 and HDFFV-10877)
+
+
+ High-Level APIs:
+ ----------------
+ - added set/get for unsigned long long attributes
+
+ The attribute writing high-level API has been expanded to include
+ public set/get functions for ULL attributes, analogously to the
+ existing set/get for other types.
+
+ (AF - 2021/09/08)
+
+ C Packet Table API:
+ -------------------
+ -
+
+ Internal header file:
+ ---------------------
+ -
+
+ Documentation:
+ --------------
+ -
+
+
+New platforms, languages and compilers tested
+=============================================
+ - Linux 5.13.14-200.fc34 #1 SMP x86_64 GNU/Linux Fedora34
+ - Linux 5.11.0-34-generic #36-Ubuntu SMP x86_64 GNU/Linux Ubuntu 20.04
+ - Linux 5.3.18-22-default #1 SMP x86_64 GNU/Linux SUSE15sp2
+ - Linux-4.14.0-115.21.2 #1 SMP ppc64le GNU/Linux
+ - Linux-4.12.14-150.75-default #1 SMP x86_64 GNU/Linux
+ - macOS Apple M1 11.6 Darwin 20.6.0 arm64
+ - macOS Big Sur 11.3.1 Darwin 20.4.0 x86_64
+ - clang versions 11.0.1, 12.0.5
+ - Visual Studio 2019 w/ clang 12.0.0 with MSVC-like command-line
+
+
+Bug Fixes since HDF5-1.12.0 release
+===================================
+ Library
+ -------
+ - Fixed an H5Pget_filter_by_id1/2() assert w/ out of range filter IDs
+
+ Both H5Pget_filter_by_id1 and 2 did not range check the filter ID, which
+ could trip as assert in debug versions of the library. The library now
+ returns a normal HDF5 error when the filter ID is out of range.
+
+ (DER - 2021/11/23, HDFFV-11286)
+
+ - Fixed an issue with collective metadata reads being permanently disabled
+ after a dataset chunk lookup operation. This would usually cause a
+ mismatched MPI_Bcast and MPI_ERR_TRUNCATE issue in the library for
+ simple cases of H5Dcreate() -> H5Dwrite() -> H5Dcreate().
+
+ (JTH - 2021/11/08, HDFFV-11090)
+
+ - Fixed cross platform incompatibility of references within variable length
+ types
+
+ Reference types within variable length types previously could not be
+ read on a platform with different endianness from where they were
+ written. Fixed so cross platform portability is restored.
+
+ (NAF - 2021/09/30)
+
+ - Detection of simple data transform function "x"
+
+ In the case of the simple data transform function "x" the (parallel)
+ library recognizes this is the same as not applying this data transform
+ function. This improves the I/O performance. In the case of the parallel
+ library, it also avoids breaking to independent I/O, which makes it
+ possible to apply a filter when writing or reading data to or from
+ the HDF5 file.
+
+ (JWSB - 2021/09/13)
+
+ - Fixed an invalid read and memory leak when parsing corrupt file space
+ info messages
+
+ When the corrupt file from CVE-2020-10810 was parsed by the library,
+ the code that imports the version 0 file space info object header
+ message to the version 1 struct could read past the buffer read from
+ the disk, causing an invalid memory read. Not catching this error would
+ cause downstream errors that eventually resulted in a previously
+ allocated buffer to be unfreed when the library shut down. In builds
+ where the free lists are in use, this could result in an infinite loop
+ and SIGABRT when the library shuts down.
+
+ We now track the buffer size and raise an error on attempts to read
+ past the end of it.
+
+ (DER - 2021/08/12, HDFFV-11053)
+
+
+ - Fixed CVE-2018-14460
+
+ The tool h5repack produced a segfault when the rank in dataspace
+ message was corrupted, causing invalid read while decoding the
+ dimension sizes.
+
+ The problem was fixed by ensuring that decoding the dimension sizes
+ and max values will not go beyond the end of the buffer.
+
+ (BMR - 2021/05/12, HDFFV-11223)
+
+ - Fixed CVE-2018-11206
+
+ The tool h5dump produced a segfault when the size of a fill value
+ message was corrupted and caused a buffer overflow.
+
+ The problem was fixed by verifying the fill value's size
+ against the buffer size before attempting to access the buffer.
+
+ (BMR - 2021/03/15, HDFFV-10480)
+
+ - Fixed CVE-2018-14033 (same issue as CVE-2020-10811)
+
+ The tool h5dump produced a segfault when the storage size message
+ was corrupted and caused a buffer overflow.
+
+ The problem was fixed by verifying the storage size against the
+ buffer size before attempting to access the buffer.
+
+ (BMR - 2021/03/15, HDFFV-11159/HDFFV-11049)
+
+ - Remove underscores on header file guards
+
+ Header file guards used a variety of underscores at the beginning of the define.
+
+ Removed all leading (some trailing) underscores from header file guards.
+
+ (ADB - 2021/03/03, #361)
+
+ - Fixed a segmentation fault
+
+ A segmentation fault occurred with a Mathworks corrupted file.
+
+ A detection of accessing a null pointer was added to prevent the problem.
+
+ (BMR - 2021/02/19, HDFFV-11150)
+
+ - Fixed issue with MPI communicator and info object not being
+ copied into new FAPL retrieved from H5F_get_access_plist
+
+ Added logic to copy the MPI communicator and info object into
+ the output FAPL. MPI communicator is retrieved from the VFD, while
+ the MPI info object is retrieved from the file's original FAPL.
+
+ (JTH - 2021/02/15, HDFFV-11109)
+
+ - Fixed problems with vlens and refs inside compound using
+ H5VLget_file_type()
+
+ Modified library to properly ref count H5VL_object_t structs and only
+ consider file vlen and reference types to be equal if their files are
+ the same.
+
+ (NAF - 2021/01/22)
+
+ - Fixed CVE-2018-17432
+
+ The tool h5repack produced a segfault on a corrupted file which had
+ invalid rank for scalar or NULL datatype.
+
+ The problem was fixed by modifying the dataspace encode and decode
+ functions to detect and report invalid rank. h5repack now fails
+ with an error message for the corrupted file.
+
+ (BMR - 2020/10/26, HDFFV-10590)
+
+ - Creation of dataset with optional filter
+
+ When the combination of type, space, etc doesn't work for filter
+ and the filter is optional, it was supposed to be skipped but it was
+ not skipped and the creation failed.
+
+ Allowed the creation of the dataset in such a situation.
+
+ (BMR - 2020/08/13, HDFFV-10933)
+
+ - Explicitly declared dlopen to use RTLD_LOCAL
+
+ dlopen documentation states that if neither RTLD_GLOBAL nor
+ RTLD_LOCAL are specified, then the default behavior is unspecified.
+ The default on linux is usually RTLD_LOCAL while macos will default
+ to RTLD_GLOBAL.
+
+ (ADB - 2020/08/12, HDFFV-11127)
+
+ - H5Sset_extent_none() sets the dataspace class to H5S_NO_CLASS which
+ causes asserts/errors when passed to other dataspace API calls.
+
+ H5S_NO_CLASS is an internal class value that should not have been
+ exposed via a public API call.
+
+ In debug builds of the library, this can cause assert() function to
+ trip. In non-debug builds, it will produce normal library errors.
+
+ The new library behavior is for H5Sset_extent_none() to convert
+ the dataspace into one of type H5S_NULL, which is better handled
+ by the library and easier for developers to reason about.
+
+ (DER - 2020/07/27, HDFFV-11027)
+
+ - Fixed issues CVE-2018-13870 and CVE-2018-13869
+
+ When a buffer overflow occurred because a name length was corrupted
+ and became very large, h5dump crashed on memory access violation.
+
+ A check for reading pass the end of the buffer was added to multiple
+ locations to prevent the crashes and h5dump now simply fails with an
+ error message when this error condition occurs.
+
+ (BMR - 2020/07/22, HDFFV-11120 and HDFFV-11121)
+
+ - Fixed the segmentation fault when reading attributes with multiple threads
+
+ It was reported that the reading of attributes with variable length string
+ datatype will crash with segmentation fault particularly when the number of
+ threads is high (>16 threads). The problem was due to the file pointer that
+ was set in the variable length string datatype for the attribute. That file
+ pointer was already closed when the attribute was accessed.
+
+ The problem was fixed by setting the file pointer to the current opened file pointer
+ when the attribute was accessed. Similar patch up was done before when reading
+ dataset with variable length string datatype.
+
+ (VC - 2020/07/13, HDFFV-11080)
+
+ - Fixed CVE-2020-10810
+
+ The tool h5clear produced a segfault during an error recovery in
+ the superblock decoding. An internal pointer was reset to prevent
+ further accessing when it is not assigned with a value.
+
+ (BMR - 2020/06/29, HDFFV-11053)
+
+ - Fixed CVE-2018-17435
+
+ The tool h52gif produced a segfault when the size of an attribute
+ message was corrupted and caused a buffer overflow.
+
+ The problem was fixed by verifying the attribute message's size
+ against the buffer size before accessing the buffer. h52gif was
+ also fixed to display the failure instead of silently exiting
+ after the segfault was eliminated.
+
+ (BMR - 2020/06/19, HDFFV-10591)
+
+
+ Java Library
+ ------------
+ - JNI utility function does not handle new references.
+
+ The JNI utility function for converting reference data to string did
+ not use the new APIs. In addition to fixing that function, added new
+ java tests for using the new APIs.
+
+ (ADB - 2021/02/16, HDFFV-11212)
+
+ - The H5FArray.java class, in which virtually the entire execution time
+ is spent using the HDFNativeData method that converts from an array
+ of bytes to an array of the destination Java type.
+
+ 1. Convert the entire byte array into a 1-d array of the desired type,
+ rather than performing 1 conversion per row;
+ 2. Use the Java Arrays method copyOfRange to grab the section of the
+ array from (1) that is desired to be inserted into the destination array.
+
+ (PGT,ADB - 2020/12/13, HDFFV-10865)
+
+ - Added ability to test java library with VOLs.
+
+ Created a new CMake script that combines the java and vol test scripts.
+
+ (ADB - 2020/02/03, HDFFV-10996)
+
+ - Tests fail for non-English locales.
+
+ In the JUnit tests with a non-English locale, only the part before
+ the decimal comma is replaced by XXXX and this leads to a comparison
+ error. Changed the regex for the Time substitution.
+
+ (ADB - 2020/01/09, HDFFV-10995)
+
+
+ Configuration
+ -------------
+ - Corrected path searched by CMake find_package command
+
+ The install path for cmake find_package files had been changed to use
+ "share/cmake"
+ for all platforms. However the trailing "hdf5" directory was not removed.
+ This "hdf5" additional directory has been removed.
+
+ (ADB - 2021/09/27)
+
+ - Corrected pkg-config compile script
+
+ It was discovered that the position of the "$@" argument for the command
+ in the compile script may fail on some platforms and configurations. The
+ position of the "$@"command argument was moved before the pkg-config sub command.
+
+ (ADB - 2021/08/30)
+
+ - Fixed CMake C++ compiler flags
+
+ A recent refactoring of the C++ configure files accidentally removed the
+ file that executed the enable_language command for C++ needed by the
+ HDFCXXCompilerFlags.cmake file. Also updated the intel warnings files,
+ including adding support for windows platforms.
+
+ (ADB - 2021/08/10)
+
+ - Better support for libaec (open-source Szip library) in CMake
+
+ Implemented better support for libaec 1.0.5 (or later) library. This version
+ of libaec contains improvements for better integration with HDF5. Furthermore,
+ the variable USE_LIBAEC_STATIC has been introduced to allow to make use of
+ static version of libaec library. Use libaec_DIR or libaec_ROOT to set
+ the location in which libaec can be found.
+
+ Be aware, the Szip library of libaec 1.0.4 depends on another library within
+ libaec library. This dependency is not specified in the current CMake
+ configuration which means that one can not use the static Szip library of
+ libaec 1.0.4 when building HDF5. This has been resolved in libaec 1.0.5.
+
+ (JWSB - 2021/06/22)
+
+ - Refactor CMake configure for Fortran
+
+ The Fortran configure tests for KINDs reused a single output file that was
+ read to form the Integer and Real Kinds defines. However, if config was run
+ more then once, the CMake completed variable prevented the tests from executing
+ again and the last value saved in the file was used to create the define.
+ Creating separate files for each KIND solved the issue.
+
+ In addition the test for H5_PAC_C_MAX_REAL_PRECISION was not pulling in
+ defines for proper operation and did not define H5_PAC_C_MAX_REAL_PRECISION
+ correctly for a zero value. This was fixed by supplying the required defines.
+ In addition it was moved from the Fortran specific HDF5UseFortran.camke file
+ to the C centric ConfigureChecks.cmake file.
+
+ (ADB - 2021/06/03)
+
+ - Move emscripten flag to compile flags
+
+ The emscripten flag, -O0, was removed from target_link_libraries command
+ to the correct target_compile_options command.
+
+ (ADB - 2021/04/26 HDFFV-11083)
+
+ - Remove arbitrary warning flag groups from CMake builds
+
+ The arbitrary groups were created to reduce the quantity of warnings being
+ reported that overwhelmed testing report systems. Considerable work has
+ been accomplished to reduce the warning count and these arbitrary groups
+ are no longer needed.
+ Also the default for all warnings, HDF5_ENABLE_ALL_WARNINGS, is now ON.
+
+ Visual Studio warnings C4100, C4706, and C4127 have been moved to
+ developer warnings, HDF5_ENABLE_DEV_WARNINGS, and are disabled for normal builds.
+
+ (ADB - 2021/03/22, HDFFV-11228)
+
+ - Reclassify CMake messages, to allow new modes and --log-level option
+
+ CMake message commands have a mode argument. By default, STATUS mode
+ was chosen for any non-error message. CMake version 3.15 added additional
+ modes, NOTICE, VERBOSE, DEBUG and TRACE. All message commands with a mode
+ of STATUS were reviewed and most were reclassified as VERBOSE. The new
+ mode was protected by a check for a CMake version of at least 3.15. If CMake
+ version 3.17 or above is used, the user can use the command line option
+ of "--log-level" to further restrict which message commands are displayed.
+
+ (ADB - 2021/01/11, HDFFV-11144)
+
+ - Fixes Autotools determination of the stat struct having an st_blocks field
+
+ A missing parenthesis in an autoconf macro prevented building the test
+ code used to determine if the stat struct contains the st_blocks field.
+ Now that the test functions correctly, the H5_HAVE_STAT_ST_BLOCKS #define
+ found in H5pubconf.h will be defined correctly on both the Autotools and
+ CMake. This #define is only used in the tests and does not affect the
+ HDF5 C library.
+
+ (DER - 2021/01/07, HDFFV-11201)
+
+ - Add missing ENV variable line to hdfoptions.cmake file
+
+ Using the build options to use system SZIP/ZLIB libraries need to also
+ specify the library root directory. Setting the {library}_ROOT ENV
+ variable was added to the hdfoptions.cmake file.
+
+ (ADB - 2020/10/19 HDFFV-11108)
+
+
+ Tools
+ -----
+ - Changed how h5dump and h5ls identify long double.
+
+ Long double support is not consistent across platforms. Tools will always
+ identify long double as 128-bit [little/big]-endian float nn-bit precision.
+ New test file created for datasets with attributes for float, double and
+ long double. In addition any unknown integer or float datatype will now
+ also show the number of bits for precision.
+ These files are also used in the java tests.
+
+ (ADB - 2021/03/24, HDFFV-11229,HDFFV-11113)
+
+ - Fixed tools argument parsing.
+
+ Tools parsing used the length of the option from the long array to match
+ the option from the command line. This incorrectly matched a shorter long
+ name option that happened to be a subset of another long option.
+ Changed to match whole names.
+
+ (ADB - 2021/01/19, HDFFV-11106)
+
+ - The tools library was updated by standardizing the error stack process.
+
+ General sequence is:
+ h5tools_setprogname(PROGRAMNAME);
+ h5tools_setstatus(EXIT_SUCCESS);
+ h5tools_init();
+ ... process the command-line (check for error-stack enable) ...
+ h5tools_error_report();
+ ... (do work) ...
+ h5diff_exit(ret);
+
+ (ADB - 2020/07/20, HDFFV-11066)
+
+ - h5diff fixed a command line parsing error.
+
+ h5diff would ignore the argument to -d (delta) if it is smaller than DBL_EPSILON.
+ The macro H5_DBL_ABS_EQUAL was removed and a direct value comparison was used.
+
+ (ADB - 2020/07/20, HDFFV-10897)
+
+ - h5diff added a command line option to ignore attributes.
+
+ h5diff would ignore all objects with a supplied path if the exclude-path argument is used.
+ Adding the exclude-attribute argument will only exclude attributes, with the supplied path,
+ from comparison.
+
+ (ADB - 2020/07/20, HDFFV-5935)
+
+ - h5diff added another level to the verbose argument to print filenames.
+
+ Added verbose level 3 that is level 2 plus the filenames. The levels are:
+ 0 : Identical to '-v' or '--verbose'
+ 1 : All level 0 information plus one-line attribute status summary
+ 2 : All level 1 information plus extended attribute status report
+ 3 : All level 2 information plus file names
+
+ (ADB - 2020/07/20, HDFFV-1005)
+
+ - h5repack was fixed to repack the reference attributes properly.
+
+ The code line that checks if the update of reference inside a compound
+ datatype is misplaced outside the code block loop that carries out the
+ check. In consequence, the next attribute that is not the reference
+ type was repacked again as the reference type and caused the failure of
+ repacking. The fix is to move the corresponding code line to the correct
+ code block.
+
+ (KY -2020/02/10, HDFFV-11014)
+
+ - h5diff was updated to use the new reference APIs.
+
+ h5diff uses the new reference APIs to compare references.
+ Attribute references can also be compared.
+
+ (ADB - 2019/12/19, HDFFV-10980)
+
+ - h5dump and h5ls were updated to use the new reference APIs.
+
+ The tools library now use the new reference APIs to inspect a
+ file. Also the DDL spec was updated to reflect the format
+ changes produced with the new APIs. The export API and support
+ functions in the JNI were updated to match.
+
+ (ADB - 2019/12/06, HDFFV-10876 and HDFFV-10877)
+
+
+ Performance
+ -------------
+ -
+
+
+ Fortran API
+ -----------
+ - Corrected INTERFACE INTENT(IN) to INTENT(OUT) for buf_size in h5fget_file_image_f.
+
+ (MSB - 2020/02/18, HDFFV-11029)
+
+
+ High-Level Library
+ ------------------
+ - Fixed HL_test_packet, test for packet table vlen of vlen.
+
+ Incorrect length assignment.
+
+ (ADB - 2021/10/14)
+
+
+ Fortran High-Level APIs
+ -----------------------
+ -
+
+
+ Documentation
+ -------------
+ -
+
+
+ F90 APIs
+ --------
+ -
+
+
+ C++ APIs
+ --------
+ - Added DataSet::operator=
+
+ Some compilers complain if the copy constructor is given explicitly
+ but the assignment operator is implicitly set to default.
+
+ (2021/05/19)
+
+
+ Testing
+ -------
+ - Stopped java/test/junit.sh.in installing libs for testing under ${prefix}
+
+ Lib files needed are now copied to a subdirectory in the java/test
+ directory, and on Macs the loader path for libhdf5.xxxs.so is changed
+ in the temporary copy of libhdf5_java.dylib.
+
+ (LRK, 2020/07/02, HDFFV-11063)
+
+
+Platforms Tested
+===================
+
+ Linux 5.13.14-200.fc34 GNU gcc (GCC) 11.2.1 2021078 (Red Hat 11.2.1-1)
+ #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 2021078 (Red Hat 11.2.1-1)
+ Fedora34 clang version 12.0.1 (Fedora 12.0.1-1.fc34)
+ (cmake and autotools)
+
+ Linux 5.11.0-34-generic GNU gcc (GCC) 9.3.0-17ubuntu1
+ #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.3.0-17ubuntu1
+ Ubuntu 20.04 Ubuntu clang version 10.0.0-4
+ (cmake and autotools)
+
+ Linux 5.8.0-63-generic GNU gcc (GCC) 10.3.0-1ubuntu1
+ #71-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 10.3.0-1ubuntu1
+ Ubuntu20.10 Ubuntu clang version 11.0.0-2
+ (cmake and autotools)
+
+ Linux 5.3.18-22-default GNU gcc (SUSE Linux) 7.5.0
+ #1 SMP x86_64 GNU/Linux GNU Fortran (SUSE Linux) 7.5.0
+ SUSE15sp2 clang version 7.0.1 (tags/RELEASE_701/final 349238)
+ (cmake and autotools)
+
+ Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release
+ #1 SMP ppc64le GNU/Linux clang 8.0.1, 11.0.1
+ (lassen) GCC 7.3.1
+ XL 16.1.1.2
+ (cmake)
+
+ Linux-4.12.14-150.75-default cray-mpich/7.7.10
+ #1 SMP x86_64 GNU/Linux GCC 7.3.0, 8.2.0
+ (cori) Intel (R) Version 19.0.3.199
+ (cmake)
+
+ Linux-4.12.14-197.86-default cray-mpich/7.7.6
+ # 1SMP x86_64 GNU/Linux GCC 7.3.0, 9.3.0, 10.2.0
+ (mutrino) Intel (R) Version 17.0.4, 18.0.5, 19.1.3
+ (cmake)
+
+ Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39)
+ #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39)
+ Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39)
+
+ Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ (jelly/kituo/moohan) Version 4.9.3, Version 5.3.0, Version 6.3.0,
+ Version 7.2.0, Version 8.3.0, Version 9.1.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 17.0.0.098 Build 20160721
+ GNU C (gcc) and C++ (g++) 4.8.5 compilers
+ with NAG Fortran Compiler Release 6.1(Tozai)
+ Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers
+ with NAG Fortran Compiler Release 6.1(Tozai)
+ MPICH 3.1.4 compiled with GCC 4.9.3
+ MPICH 3.3 compiled with GCC 7.2.0
+ OpenMPI 2.1.6 compiled with icc 18.0.1
+ OpenMPI 3.1.3 and 4.0.0 compiled with GCC 7.2.0
+ PGI C, Fortran, C++ for 64-bit target on
+ x86_64;
+ Version 19.10-0
+
+ Linux-3.10.0-1127.0.0.1chaos openmpi-4.0.0
+ #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1
+ (quartz) GCC 7.3.0, 8.1.0
+ Intel 16.0.4, 18.0.2, 19.0.4
+
+ macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11)
+ Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0
+ (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609
+
+ macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9)
+ Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0
+ (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228
+
+ macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4)
+ 64-bit gfortran GNU Fortran (GCC) 6.3.0
+ (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416
+
+ macOS Sierra 10.12.6 Apple LLVM version 9.0.0 (clang-900.39.2)
+ 64-bit gfortran GNU Fortran (GCC) 7.4.0
+ (kite) Intel icc/icpc/ifort version 17.0.2
+
+ Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1011test) Intel icc/icpc/ifort version 16.0.2
+
+
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ Centos6 Version 4.4.7 20120313
+ (platypus) Version 4.9.3, 5.3.0, 6.2.0
+ MPICH 3.1.4 compiled with GCC 4.9.3
+ PGI C, Fortran, C++ for 64-bit target on
+ x86_64;
+ Version 19.10-0
+
+ Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake)
+ Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake)
+ Visual Studio 2019 w/ clang 12.0.0
+ with MSVC-like command-line (C/C++ only - cmake)
+ Visual Studio 2019 w/ Intel Fortran 19 (cmake)
+ Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake)
+
+
+Known Problems
+==============
+ Setting a variable-length dataset fill value will leak the memory allocated
+ for the p field of the hvl_t struct. A fix is in progress for this.
+ HDFFV-10840
+
+ CMake files do not behave correctly with paths containing spaces.
+ Do not use spaces in paths because the required escaping for handling spaces
+ results in very complex and fragile build files.
+ ADB - 2019/05/07
+
+ At present, metadata cache images may not be generated by parallel
+ applications. Parallel applications can read files with metadata cache
+ images, but since this is a collective operation, a deadlock is possible
+ if one or more processes do not participate.
+
+ CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA
+ issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler.
+
+ The subsetting option in ph5diff currently will fail and should be avoided.
+ The subsetting option works correctly in serial h5diff.
+
+ Known problems in previous releases can be found in the HISTORY*.txt files
+ in the HDF5 source. Please report any new problems found to
+ help@hdfgroup.org.
+
+
+CMake vs. Autotools installations
+=================================
+While both build systems produce similar results, there are differences.
+Each system produces the same set of folders on linux (only CMake works
+on standard Windows); bin, include, lib and share. Autotools places the
+COPYING and RELEASE.txt file in the root folder, CMake places them in
+the share folder.
+
+The bin folder contains the tools and the build scripts. Additionally, CMake
+creates dynamic versions of the tools with the suffix "-shared". Autotools
+installs one set of tools depending on the "--enable-shared" configuration
+option.
+ build scripts
+ -------------
+ Autotools: h5c++, h5cc, h5fc
+ CMake: h5c++, h5cc, h5hlc++, h5hlcc
+
+The include folder holds the header files and the fortran mod files. CMake
+places the fortran mod files into separate shared and static subfolders,
+while Autotools places one set of mod files into the include folder. Because
+CMake produces a tools library, the header files for tools will appear in
+the include folder.
+
+The lib folder contains the library files, and CMake adds the pkgconfig
+subfolder with the hdf5*.pc files used by the bin/build scripts created by
+the CMake build. CMake separates the C interface code from the fortran code by
+creating C-stub libraries for each Fortran library. In addition, only CMake
+installs the tools library. The names of the szip libraries are different
+between the build systems.
+
+The share folder will have the most differences because CMake builds include
+a number of CMake specific files for support of CMake's find_package and support
+for the HDF5 Examples CMake project.
+
+The issues with the gif tool are:
+ HDFFV-10592 CVE-2018-17433
+ HDFFV-10593 CVE-2018-17436
+ HDFFV-11048 CVE-2020-10809
+These CVE issues have not yet been addressed and can be avoided by not building
+the gif tool. Disable building the High-Level tools with these options:
+ autotools: --disable-hltools
+ cmake: HDF5_BUILD_HL_TOOLS=OFF
+
+
+%%%%1.13.1%%%%
+
+HDF5 version 1.13.1 released on 2022-03-02
+================================================================================
+
+
+INTRODUCTION
+============
+
+This document describes the differences between this release and the previous
+HDF5 release. It contains information on the platforms tested and known
+problems in this release. For more details check the HISTORY*.txt files in the
+HDF5 source.
+
+Note that documentation in the links below will be updated at the time of each
+final release.
+
+Links to HDF5 documentation can be found on The HDF5 web page:
+
+ https://portal.hdfgroup.org/display/HDF5/HDF5
+
+The official HDF5 releases can be obtained from:
+
+ https://www.hdfgroup.org/downloads/hdf5/
+
+Changes from Release to Release and New Features in the HDF5-1.13.x release series
+can be found at:
+
+ https://portal.hdfgroup.org/display/HDF5/HDF5+Application+Developer%27s+Guide
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for new platforms and languages
+- Bug Fixes since HDF5-1.13.0
+- Platforms Tested
+- Known Problems
+- CMake vs. Autotools installations
+
+
+New Features
+============
+
+ Configuration:
+ -------------
+ - Reworked corrected path searched by CMake find_package command
+
+ The install path for cmake find_package files had been changed to use
+ "share/cmake"
+ for all platforms. However setting the HDF5_ROOT variable failed to locate
+ the configuration files. The build variable HDF5_INSTALL_CMAKE_DIR is now
+ set to the <INSTALL_DIR>/cmake folder. The location of the configuration
+ files can still be specified by the "HDF5_DIR" variable.
+
+ (ADB - 2022/02/02)
+
+ - CPack will now generate RPM/DEB packages.
+
+ Enabled the RPM and DEB CPack generators on linux. In addition to
+ generating STGZ and TGZ packages, CPack will try to package the
+ library for RPM and DEB packages. This is the initial attempt and
+ may change as issues are resolved.
+
+ (ADB - 2022/01/27)
+
+ - Added new option to the h5cc scripts produced by CMake.
+
+ Add -showconfig option to h5cc scripts to cat the
+ libhdf5.settings file to the standard output.
+
+ (ADB - 2022/01/25)
+
+ - CMake will now run the PowerShell script tests in test/ by default
+ on Windows.
+
+ The test directory includes several shell script tests that previously
+ were not run by CMake on Windows. These are now run by default.
+ If TEST_SHELL_SCRIPTS is ON and PWSH is found, the PowerShell scripts
+ will execute. Similar to the bash scripts on unix platforms.
+
+ (ADB - 2021/11/23)
+
+
+ Library:
+ --------
+ - Add a new public function, H5ESget_requests()
+
+ This function allows the user to retrieve request pointers from an event
+ set. It is intended for use primarily by VOL plugin developers.
+
+ (NAF - 2022/01/11)
+
+
+ Parallel Library:
+ -----------------
+ - Several improvements to parallel compression feature, including:
+
+ * Improved support for collective I/O (for both writes and reads)
+
+ * Significant reduction of memory usage for the feature as a whole
+
+ * Reduction of copying of application data buffers passed to H5Dwrite
+
+ * Addition of support for incremental file space allocation for filtered
+ datasets created in parallel. Incremental file space allocation is the
+ default for these types of datasets (early file space allocation is
+ also still supported), while early file space allocation is still the
+ default (and only supported at allocation time) for unfiltered datasets
+ created in parallel. Incremental file space allocation should help with
+ parallel HDF5 applications that wish to use fill values on filtered
+ datasets, but would typically avoid doing so since dataset creation in
+ parallel would often take an excessive amount of time. Since these
+ datasets previously used early file space allocation, HDF5 would
+ allocate space for and write fill values to every chunk in the dataset
+ at creation time, leading to noticeable overhead. Instead, with
+ incremental file space allocation, allocation of file space for chunks
+ and writing of fill values to those chunks will be delayed until each
+ individual chunk is initially written to.
+
+ * Addition of support for HDF5's "don't filter partial edge chunks" flag
+ (https://portal.hdfgroup.org/display/HDF5/H5P_SET_CHUNK_OPTS)
+
+ * Addition of proper support for HDF5 fill values with the feature
+
+ * Addition of 'H5_HAVE_PARALLEL_FILTERED_WRITES' macro to H5pubconf.h
+ so HDF5 applications can determine at compile-time whether the feature
+ is available
+
+ * Addition of simple examples (ph5_filtered_writes.c and
+ ph5_filtered_writes_no_sel.c) under examples directory to demonstrate
+ usage of the feature
+
+ * Improved coverage of regression testing for the feature
+
+ (JTH - 2022/2/23)
+
+
+Support for new platforms, languages and compilers
+==================================================
+ - None
+
+
+Bug Fixes since HDF5-1.13.0 release
+===================================
+ Library
+ -------
+ - Fixed a metadata cache bug when resizing a pinned/protected cache entry
+
+ When resizing a pinned/protected cache entry, the metadata
+ cache code previously would wait until after resizing the
+ entry to attempt to log the newly-dirtied entry. This
+ caused H5C_resize_entry to mark the entry as dirty and made
+ H5AC_resize_entry think that it didn't need to add the
+ newly-dirtied entry to the dirty entries skiplist.
+
+ Thus, a subsequent H5AC__log_moved_entry would think it
+ needed to allocate a new entry for insertion into the dirty
+ entry skip list, since the entry didGn't exist on that list.
+ This caused an assertion failure, as the code to allocate a
+ new entry assumes that the entry is not dirty.
+
+ (JRM - 2022/02/28)
+
+ - Issue #1436 identified a problem with the H5_VERS_RELEASE check in the
+ H5check_version function.
+
+ Investigating the original fix, #812, we discovered some inconsistencies
+ with a new block added to check H5_VERS_RELEASE for incompatibilities.
+ This new block was not using the new warning text dealing with the
+ H5_VERS_RELEASE check and would cause the warning to be duplicated.
+
+ By removing the H5_VERS_RELEASE argument in the first check for
+ H5_VERS_MAJOR and H5_VERS_MINOR, the second check would only check
+ the H5_VERS_RELEASE for incompatible release versions. This adheres
+ to the statement that except for the develop branch, all release versions
+ in a major.minor maintenance branch should be compatible. The prerequisite
+ is that an application will not use any APIs not present in all release versions.
+
+ (ADB - 2022/02/24, #1438)
+
+ - Unified handling of collective metadata reads to correctly fix old bugs
+
+ Due to MPI-related issues occurring in HDF5 from mismanagement of the
+ status of collective metadata reads, they were forced to be disabled
+ during chunked dataset raw data I/O in the HDF5 1.10.5 release. This
+ wouldn't generally have affected application performance because HDF5
+ already disables collective metadata reads during chunk lookup, since
+ it is generally unlikely that the same chunks will be read by all MPI
+ ranks in the I/O operation. However, this was only a partial solution
+ that wasn't granular enough.
+
+ This change now unifies the handling of the file-global flag and the
+ API context-level flag for collective metadata reads in order to
+ simplify querying of the true status of collective metadata reads. Thus,
+ collective metadata reads are once again enabled for chunked dataset
+ raw data I/O, but manually controlled at places where some processing
+ occurs on MPI rank 0 only and would cause issues when collective
+ metadata reads are enabled.
+
+ (JTH - 2021/11/16, HDFFV-10501/HDFFV-10562)
+
+ - Fixed several potential MPI deadlocks in library failure conditions
+
+ In the parallel library, there were several places where MPI rank 0
+ could end up skipping past collective MPI operations when some failure
+ occurs in rank 0-specific processing. This would lead to deadlocks
+ where rank 0 completes an operation while other ranks wait in the
+ collective operation. These places have been rewritten to have rank 0
+ push an error and try to cleanup after the failure, then continue to
+ participate in the collective operation to the best of its ability.
+
+ (JTH - 2021/11/09)
+
+
+Platforms Tested
+===================
+
+ Linux 5.13.14-200.fc34 GNU gcc (GCC) 11.2.1 2021078 (Red Hat 11.2.1-1)
+ #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 11.2.1 2021078 (Red Hat 11.2.1-1)
+ Fedora34 clang version 12.0.1 (Fedora 12.0.1-1.fc34)
+ (cmake and autotools)
+
+ Linux 5.11.0-34-generic GNU gcc (GCC) 9.3.0-17ubuntu1
+ #36-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 9.3.0-17ubuntu1
+ Ubuntu 20.04 Ubuntu clang version 10.0.0-4
+ (cmake and autotools)
+
+ Linux 5.8.0-63-generic GNU gcc (GCC) 10.3.0-1ubuntu1
+ #71-Ubuntu SMP x86_64 GNU/Linux GNU Fortran (GCC) 10.3.0-1ubuntu1
+ Ubuntu20.10 Ubuntu clang version 11.0.0-2
+ (cmake and autotools)
+
+ Linux 5.3.18-22-default GNU gcc (SUSE Linux) 7.5.0
+ #1 SMP x86_64 GNU/Linux GNU Fortran (SUSE Linux) 7.5.0
+ SUSE15sp2 clang version 7.0.1 (tags/RELEASE_701/final 349238)
+ (cmake and autotools)
+
+ Linux-4.14.0-115.21.2 spectrum-mpi/rolling-release
+ #1 SMP ppc64le GNU/Linux clang 8.0.1, 11.0.1
+ (lassen) GCC 7.3.1
+ XL 16.1.1.2
+ (cmake)
+
+ Linux-3.10.0-1160.49.1 openmpi-intel/4.1
+ #1 SMP x86_64 GNU/Linux Intel(R) Version 18.0.5, 19.1.2
+ (chama) (cmake)
+
+ Linux-4.12.14-150.75-default cray-mpich/7.7.10
+ #1 SMP x86_64 GNU/Linux GCC 7.3.0, 8.2.0
+ (cori) Intel (R) Version 19.0.3.199
+ (cmake)
+
+ Linux-4.12.14-197.86-default cray-mpich/7.7.6
+ # 1SMP x86_64 GNU/Linux GCC 7.3.0, 9.3.0, 10.2.0
+ (mutrino) Intel (R) Version 17.0.4, 18.0.5, 19.1.3
+ (cmake)
+
+ Linux 3.10.0-1160.36.2.el7.ppc64 gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39)
+ #1 SMP ppc64be GNU/Linux g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39)
+ Power8 (echidna) GNU Fortran (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39)
+
+ Linux 3.10.0-1160.24.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ Centos7 Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ (jelly/kituo/moohan) Version 4.9.3, Version 5.3.0, Version 6.3.0,
+ Version 7.2.0, Version 8.3.0, Version 9.1.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 17.0.0.098 Build 20160721
+ GNU C (gcc) and C++ (g++) 4.8.5 compilers
+ with NAG Fortran Compiler Release 6.1(Tozai)
+ Intel(R) C (icc) and C++ (icpc) 17.0.0.098 compilers
+ with NAG Fortran Compiler Release 6.1(Tozai)
+ MPICH 3.1.4 compiled with GCC 4.9.3
+ MPICH 3.3 compiled with GCC 7.2.0
+ OpenMPI 2.1.6 compiled with icc 18.0.1
+ OpenMPI 3.1.3 and 4.0.0 compiled with GCC 7.2.0
+ PGI C, Fortran, C++ for 64-bit target on
+ x86_64;
+ Version 19.10-0
+
+ Linux-3.10.0-1127.0.0.1chaos openmpi-4.0.0
+ #1 SMP x86_64 GNU/Linux clang 6.0.0, 11.0.1
+ (quartz) GCC 7.3.0, 8.1.0
+ Intel 16.0.4, 18.0.2, 19.0.4
+
+ macOS Apple M1 11.6 Apple clang version 12.0.5 (clang-1205.0.22.11)
+ Darwin 20.6.0 arm64 gfortran GNU Fortran (Homebrew GCC 11.2.0) 11.1.0
+ (macmini-m1) Intel icc/icpc/ifort version 2021.3.0 202106092021.3.0 20210609
+
+ macOS Big Sur 11.3.1 Apple clang version 12.0.5 (clang-1205.0.22.9)
+ Darwin 20.4.0 x86_64 gfortran GNU Fortran (Homebrew GCC 10.2.0_3) 10.2.0
+ (bigsur-1) Intel icc/icpc/ifort version 2021.2.0 20210228
+
+ macOS High Sierra 10.13.6 Apple LLVM version 10.0.0 (clang-1000.10.44.4)
+ 64-bit gfortran GNU Fortran (GCC) 6.3.0
+ (bear) Intel icc/icpc/ifort version 19.0.4.233 20190416
+
+ macOS Sierra 10.12.6 Apple LLVM version 9.0.0 (clang-900.39.2)
+ 64-bit gfortran GNU Fortran (GCC) 7.4.0
+ (kite) Intel icc/icpc/ifort version 17.0.2
+
+ Mac OS X El Capitan 10.11.6 Apple clang version 7.3.0 from Xcode 7.3
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1011test) Intel icc/icpc/ifort version 16.0.2
+
+
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ Centos6 Version 4.4.7 20120313
+ (platypus) Version 4.9.3, 5.3.0, 6.2.0
+ MPICH 3.1.4 compiled with GCC 4.9.3
+ PGI C, Fortran, C++ for 64-bit target on
+ x86_64;
+ Version 19.10-0
+
+ Windows 10 x64 Visual Studio 2015 w/ Intel C/C++/Fortran 18 (cmake)
+ Visual Studio 2017 w/ Intel C/C++/Fortran 19 (cmake)
+ Visual Studio 2019 w/ clang 12.0.0
+ with MSVC-like command-line (C/C++ only - cmake)
+ Visual Studio 2019 w/ Intel Fortran 19 (cmake)
+ Visual Studio 2019 w/ MSMPI 10.1 (C only - cmake)
+
+
+Known Problems
+==============
+ Setting a variable-length dataset fill value will leak the memory allocated
+ for the p field of the hvl_t struct. A fix is in progress for this.
+ HDFFV-10840
+
+ CMake files do not behave correctly with paths containing spaces.
+ Do not use spaces in paths because the required escaping for handling spaces
+ results in very complex and fragile build files.
+ ADB - 2019/05/07
+
+ At present, metadata cache images may not be generated by parallel
+ applications. Parallel applications can read files with metadata cache
+ images, but since this is a collective operation, a deadlock is possible
+ if one or more processes do not participate.
+
+ CPP ptable test fails on both VS2017 and VS2019 with Intel compiler, JIRA
+ issue: HDFFV-10628. This test will pass with VS2015 with Intel compiler.
+
+ The subsetting option in ph5diff currently will fail and should be avoided.
+ The subsetting option works correctly in serial h5diff.
+
+ Known problems in previous releases can be found in the HISTORY*.txt files
+ in the HDF5 source. Please report any new problems found to
+ help@hdfgroup.org.
+
+
+CMake vs. Autotools installations
+=================================
+While both build systems produce similar results, there are differences.
+Each system produces the same set of folders on linux (only CMake works
+on standard Windows); bin, include, lib and share. Autotools places the
+COPYING and RELEASE.txt file in the root folder, CMake places them in
+the share folder.
+
+The bin folder contains the tools and the build scripts. Additionally, CMake
+creates dynamic versions of the tools with the suffix "-shared". Autotools
+installs one set of tools depending on the "--enable-shared" configuration
+option.
+ build scripts
+ -------------
+ Autotools: h5c++, h5cc, h5fc
+ CMake: h5c++, h5cc, h5hlc++, h5hlcc
+
+The include folder holds the header files and the fortran mod files. CMake
+places the fortran mod files into separate shared and static subfolders,
+while Autotools places one set of mod files into the include folder. Because
+CMake produces a tools library, the header files for tools will appear in
+the include folder.
+
+The lib folder contains the library files, and CMake adds the pkgconfig
+subfolder with the hdf5*.pc files used by the bin/build scripts created by
+the CMake build. CMake separates the C interface code from the fortran code by
+creating C-stub libraries for each Fortran library. In addition, only CMake
+installs the tools library. The names of the szip libraries are different
+between the build systems.
+
+The share folder will have the most differences because CMake builds include
+a number of CMake specific files for support of CMake's find_package and support
+for the HDF5 Examples CMake project.
+
+The issues with the gif tool are:
+ HDFFV-10592 CVE-2018-17433
+ HDFFV-10593 CVE-2018-17436
+ HDFFV-11048 CVE-2020-10809
+These CVE issues have not yet been addressed and can be avoided by not building
+the gif tool. Disable building the High-Level tools with these options:
+ autotools: --disable-hltools
+ cmake: HDF5_BUILD_HL_TOOLS=OFF
diff --git a/release_docs/HISTORY-1_8_0-1_10_0.txt b/release_docs/HISTORY-1_8_0-1_10_0.txt
index 575d070..7b84fbc 100644
--- a/release_docs/HISTORY-1_8_0-1_10_0.txt
+++ b/release_docs/HISTORY-1_8_0-1_10_0.txt
@@ -1581,7 +1581,7 @@ Known Problems
causes failures in several HDF5 library tests.
* For HPUX 11.23 many tools tests failed for 64-bit version when linked to the
shared libraries (tested for 1.8.0-beta2)
-* For SNL, Red Storm: only paralle HDF5 is supported. The serial tests pass
+* For SNL, Red Storm: only parallel HDF5 is supported. The serial tests pass
and the parallel tests also pass with lots of non-fatal error messages.
* on SUN 5.10 C++ test fails in the "Testing Shared Datatypes with Attributes" test
* configuring with --enable-debug=all produces compiler errors on most
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index fc7d4dd..5e488be 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -1,4 +1,4 @@
-HDF5 version 1.13.1-1 currently under development
+HDF5 version 1.13.2-1 currently under development
================================================================================
@@ -36,7 +36,7 @@ CONTENTS
- New Features
- Support for new platforms and languages
-- Bug Fixes since HDF5-1.12.0
+- Bug Fixes since HDF5-1.13.1
- Platforms Tested
- Known Problems
- CMake vs. Autotools installations
@@ -47,815 +47,29 @@ New Features
Configuration:
-------------
- - CMake will now run the PowerShell script tests in test/ by default
- on Windows.
+ - HDF5 memory allocation sanity checking is now off by default for
+ Autotools debug builds
- The test directory includes several shell script tests that previously
- were not run by CMake on Windows. These are now run by default.
- If TEST_SHELL_SCRIPTS is ON and PWSH is found, the PowerShell scripts
- will execute. Similar to the bash scripts on unix platforms.
+ HDF5 can be configured to perform sanity checking on internal memory
+ allocations by adding heap canaries to these allocations. However,
+ enabling this option can cause issues with external filter plugins
+ when working with (reallocating/freeing/allocating and passing back)
+ buffers.
- (ADB - 2021/11/23)
+ Previously, this option was off by default for all CMake build types,
+ but only off by default for non-debug Autotools builds. Since debug
+ is the default build mode for HDF5 when built from source with
+ Autotools, this can result in surprising segfaults that don't occur
+ when an application is built against a release version of HDF5.
+ Therefore, this option is now off by default for all build types
+ across both CMake and Autotools.
- - Added new configure option to support building parallel tools.
- See Tools below (autotools - CMake):
- --enable-parallel-tools HDF5_BUILD_PARALLEL_TOOLS
-
- (RAW - 2021/10/25)
-
- - Added new configure options to enable dimension scales APIs (H5DS*) to
- use new object references with the native VOL connector (aka native HDF5
- library). New references are always used for non-native terminal VOL
- connectors (e.g., DAOS).
-
- Autotools --enable-dimension-scales-with-new-ref
- CMake HDF5_DIMENSION_SCALES_NEW_REF=ON
-
- (EIP - 2021/10/25, HDFFV-11180)
-
- - Refactored the utils folder.
-
- Added subfolder test and moved the 'swmr_check_compat_vfd.c file'
- from test into utils/test. Deleted the duplicate swmr_check_compat_vfd.c
- file in hl/tools/h5watch folder. Also fixed vfd check options.
-
- (ADB - 2021/10/18)
-
- - Changed autotools and CMake configurations to derive both
- compilation warnings-as-errors and warnings-only-warn configurations
- from the same files, 'config/*/*error*'. Removed redundant files
- 'config/*/*noerror*'.
-
- (DCY - 2021/09/29)
-
- - Added new option to control the build of High-Level tools
- that default ON/enabled.
-
- Add configure options (autotools - CMake):
- --enable-hltools HDF5_BUILD_HL_TOOLS
-
- Disabling this option prevents building the gif tool which
- contains the following CVEs:
- HDFFV-10592 CVE-2018-17433
- HDFFV-10593 CVE-2018-17436
- HDFFV-11048 CVE-2020-10809
-
- (ADB - 2021/09/16, HDFFV-11266)
-
- - Adds C++ Autotools configuration file for Intel
-
- * Checks for icpc as the compiler
- * Sets std=c++11
- * Copies most non-warning flags from intel-flags
-
- (DER - 2021/06/02)
-
- - Adds C++ Autotools configuration file for PGI
-
- * Checks for pgc++ as the compiler name (was: pgCC)
- * Sets -std=c++11
- * Other options basically match new C options (below)
-
- (DER - 2021/06/02)
-
- - Updates PGI C options
-
- * -Minform set to warn (was: inform) to suppress spurious messages
- * Sets -gopt -O2 as debug options
- * Sets -O4 as 'high optimization' option
- * Sets -O0 as 'no optimization' option
- * Removes specific settings for PGI 9 and 10
-
- (DER - 2021/06/02)
-
- - A C++11-compliant compiler is now required to build the C++ wrappers
-
- CMAKE_CXX_STANDARD is now set to 11 when building with CMake and
- -std=c++11 is added when building with clang/gcc via the Autotools.
-
- (DER - 2021/05/27)
-
- - CMake will now run the shell script tests in test/ by default
-
- The test directory includes several shell script tests that previously
- were not run by CMake. These are now run by default. TEST_SHELL_SCRIPTS
- has been set to ON and SH_PROGRAM has been set to bash (some test
- scripts use bash-isms). Platforms without bash (e.g., Windows) will
- ignore the script tests.
-
- (DER - 2021/05/23)
-
- - Removed unused HDF5_ENABLE_HSIZET option from CMake
-
- This has been unused for some time and has no effect.
-
- (DER - 2021/05/23)
-
- - CMake no longer builds the C++ library by default
-
- HDF5_BUILD_CPP_LIB now defaults to OFF, which is in line with the
- Autotools build defaults.
-
- (DER - 2021/04/20)
-
- - Removal of pre-VS2015 work-arounds
-
- HDF5 now requires Visual Studio 2015 or greater, so old work-around
- code and definitions have been removed, including:
-
- * <inttypes.h>
- * snprintf and vsnprintf
- * llround, llroundf, lround, lroundf, round, roundf
- * strtoll and strtoull
- * va_copy
- * struct timespec
-
- (DER - 2021/03/22)
-
- - Add CMake variable HDF5_LIB_INFIX
-
- This infix is added to all library names after 'hdf5'.
- e.g. the infix '_openmpi' results in the library name 'libhdf5_openmpi.so'
- This name is used in packages on debian based systems.
- (see https://packages.debian.org/jessie/amd64/libhdf5-openmpi-8/filelist)
-
- (barcode - 2021/03/22)
-
- - On macOS, Universal Binaries can now be built, allowing native execution on
- both Intel and Apple Silicon (ARM) based Macs.
-
- To do so, set CMAKE_OSX_ARCHITECTURES="x86_64;arm64"
-
- (SAM - 2021/02/07, github-311)
-
- - Added a configure-time option to control certain compiler warnings
- diagnostics
-
- A new configure-time option was added that allows some compiler warnings
- diagnostics to have the default operation. This is mainly intended for
- library developers and currently only works for gcc 10 and above. The
- diagnostics flags apply to C, C++ and Fortran compilers and will appear
- in "H5 C Flags", H5 C++ Flags" and H5 Fortran Flags, respectively. They
- will NOT be exported to h5cc, etc.
-
- The default is OFF, which will disable the warnings URL and color attributes
- for the warnings output. ON will not add the flags and allow default behavior.
-
- Autotools: --enable-diags
-
- CMake: HDF5_ENABLE_BUILD_DIAGS
-
- (ADB - 2021/02/05, HDFFV-11213)
-
- - CMake option to build the HDF filter plugins project as an external project
-
- The HDF filter plugins project is a collection of registered compression
- filters that can be dynamically loaded when needed to access data stored
- in a hdf5 file. This CMake-only option allows the plugins to be built and
- distributed with the hdf5 library and tools. Like the options for szip and
- zlib, either a tgz file or a git repository can be specified for the source.
-
- The option was refactored to use the CMake FetchContent process. This allows
- more control over the filter targets, but required external project command
- options to be moved to a CMake include file, HDF5PluginCache.cmake. Also
- enabled the filter examples to be used as tests for operation of the
- filter plugins.
-
- (ADB - 2020/12/10, OESS-98)
-
- - FreeBSD Autotools configuration now defaults to 'cc' and 'c++' compilers
-
- On FreeBSD, the autotools defaulted to 'gcc' as the C compiler and did
- not process C++ options. Since FreeBSD 10, the default compiler has
- been clang (via 'cc').
-
- The default compilers have been set to 'cc' for C and 'c++' for C++,
- which will pick up clang and clang++ respectively on FreeBSD 10+.
- Additionally, clang options are now set correctly for both C and C++
- and g++ options will now be set if that compiler is being used (an
- omission from the former functionality).
-
- (DER - 2020/11/28, HDFFV-11193)
-
- - Fixed POSIX problems when building w/ gcc on Solaris
-
- When building on Solaris using gcc, the POSIX symbols were not
- being set correctly, which could lead to issues like clock_gettime()
- not being found.
-
- The standard is now set to gnu99 when building with gcc on Solaris,
- which allows POSIX things to be #defined and linked correctly. This
- differs slightly from the gcc norm, where we set the standard to c99
- and manually set POSIX #define symbols.
-
- (DER - 2020/11/25, HDFFV-11191)
-
- - Added a configure-time option to consider certain compiler warnings
- as errors
-
- A new configure-time option was added that converts some compiler warnings
- to errors. This is mainly intended for library developers and currently
- only works for gcc and clang. The warnings that are considered errors
- will appear in the generated libhdf5.settings file. These warnings apply
- to C and C++ code and will appear in "H5 C Flags" and H5 C++ Flags",
- respectively. They will NOT be exported to h5cc, etc.
-
- The default is OFF. Building with this option may fail when compiling
- on operating systems and with compiler versions not commonly used by
- the library developers. Compilation may also fail when headers not
- under the control of the library developers (e.g., mpi.h, hdfs.h) raise
- warnings.
-
- Autotools: --enable-warnings-as-errors
-
- CMake: HDF5_ENABLE_WARNINGS_AS_ERRORS
-
- (DER - 2020/11/23, HDFFV-11189)
-
- - Autotools and CMake target added to produce doxygen generated documentation
-
- The default is OFF or disabled.
- Autoconf option is '--enable-doxygen'
- autotools make target is 'doxygen' and will build all doxygen targets
- CMake configure option is 'HDF5_BUILD_DOC'.
- CMake target is 'doxygen' for all available doxygen targets
- CMake target is 'hdf5lib_doc' for the src subdirectory
-
- (ADB - 2020/11/03)
-
- - CMake option to use MSVC naming conventions with MinGW
-
- HDF5_MSVC_NAMING_CONVENTION option enable to use MSVC naming conventions
- when using a MinGW toolchain
-
- (xan - 2020/10/30)
-
- - CMake option to statically link gcc libs with MinGW
-
- HDF5_MINGW_STATIC_GCC_LIBS allows to statically link libg/libstdc++
- with the MinGW toolchain
-
- (xan - 2020/10/30)
-
- - CMake option to build the HDF filter plugins project as an external project
-
- The HDF filter plugins project is a collection of registered compression
- filters that can be dynamically loaded when needed to access data stored
- in a hdf5 file. This CMake-only option allows the plugins to be built and
- distributed with the hdf5 library and tools. Like the options for szip and
- zlib, either a tgz file or a git repository can be specified for the source.
-
- The necessary options are (see the INSTALL_CMake.txt file):
- HDF5_ENABLE_PLUGIN_SUPPORT
- PLUGIN_TGZ_NAME or PLUGIN_GIT_URL
- There are more options necessary for various filters and the plugin project
- documents should be referenced.
-
- (ADB - 2020/09/27, OESS-98)
-
- - Added CMake option to format source files
-
- HDF5_ENABLE_FORMATTERS option will enable creation of targets using the
- pattern - HDF5_*_SRC_FORMAT - where * corresponds to the source folder
- or tool folder. All sources can be formatted by executing the format target;
- make format
-
- (ADB - 2020/08/24)
-
- - Add file locking configure and CMake options
-
- HDF5 1.10.0 introduced a file locking scheme, primarily to help
- enforce SWMR setup. Formerly, the only user-level control of the scheme
- was via the HDF5_USE_FILE_LOCKING environment variable.
-
- This change introduces configure-time options that control whether
- or not file locking will be used and whether or not the library
- ignores errors when locking has been disabled on the file system
- (useful on some HPC Lustre installations).
-
- In both the Autotools and CMake, the settings have the effect of changing
- the default property list settings (see the H5Pset/get_file_locking()
- entry, below).
-
- The yes/no/best-effort file locking configure setting has also been
- added to the libhdf5.settings file.
-
- Autotools:
-
- An --enable-file-locking=(yes|no|best-effort) option has been added.
-
- yes: Use file locking.
- no: Do not use file locking.
- best-effort: Use file locking and ignore "disabled" errors.
-
- CMake:
-
- Two self-explanatory options have been added:
-
- HDF5_USE_FILE_LOCKING
- HDF5_IGNORE_DISABLED_FILE_LOCKS
-
- Setting both of these to ON is the equivalent to the Autotools'
- best-effort setting.
-
- NOTE:
- The precedence order of the various file locking control mechanisms is:
-
- 1) HDF5_USE_FILE_LOCKING environment variable (highest)
-
- 2) H5Pset_file_locking()
-
- 3) configure/CMake options (which set the property list defaults)
-
- 4) library defaults (currently best-effort)
-
- (DER - 2020/07/30, HDFFV-11092)
-
- - CMake option to link the generated Fortran MOD files into the include
- directory.
-
- The Fortran generation of MOD files by a Fortran compile can produce
- different binary files between SHARED and STATIC compiles with different
- compilers and/or different platforms. Note that it has been found that
- different versions of Fortran compilers will produce incompatible MOD
- files. Currently, CMake will locate these MOD files in subfolders of
- the include directory and add that path to the Fortran library target
- in the CMake config file, which can be used by the CMake find library
- process. For other build systems using the binary from a CMake install,
- a new CMake configuration can be used to copy the pre-chosen version
- of the Fortran MOD files into the install include directory.
-
- The default will depend on the configuration of
- BUILD_STATIC_LIBS and BUILD_SHARED_LIBS:
- YES YES Default to SHARED
- YES NO Default to STATIC
- NO YES Default to SHARED
- NO NO Default to SHARED
- The defaults can be overridden by setting the config option
- HDF5_INSTALL_MOD_FORTRAN to one of NO, SHARED, or STATIC
-
- (ADB - 2020/07/09, HDFFV-11116)
-
- - CMake option to use AEC (open source SZip) library instead of SZip
-
- The open source AEC library is a replacement library for SZip. In
- order to use it for hdf5 the libaec CMake source was changed to add
- "-fPIC" and exclude test files. Autotools does not build the
- compression libraries within hdf5 builds. New option USE_LIBAEC is
- required to compensate for the different files produced by AEC build.
-
- (ADB - 2020/04/22, OESS-65)
-
- - CMake ConfigureChecks.cmake file now uses CHECK_STRUCT_HAS_MEMBER
-
- Some handcrafted tests in HDFTests.c has been removed and the CMake
- CHECK_STRUCT_HAS_MEMBER module has been used.
-
- (ADB - 2020/03/24, TRILAB-24)
-
- - Both build systems use same set of warnings flags
-
- GNU C, C++ and gfortran warnings flags were moved to files in a config
- sub-folder named gnu-warnings. Flags that only are available for a specific
- version of the compiler are in files named with that version.
- Clang C warnings flags were moved to files in a config sub-folder
- named clang-warnings.
- Intel C, Fortran warnings flags were moved to files in a config sub-folder
- named intel-warnings.
-
- There are flags in named "error-xxx" files with warnings that may
- be promoted to errors. Some source files may still need fixes.
-
- There are also pairs of files named "developer-xxx" and "no-developer-xxx"
- that are chosen by the CMake option:HDF5_ENABLE_DEV_WARNINGS or the
- configure option:--enable-developer-warnings.
-
- In addition, CMake no longer applies these warnings for examples.
-
- (ADB - 2020/03/24, TRILAB-192)
-
- - Added test script for file size compare
-
- If CMake minimum version is at least 3.14, the fileCompareTest.cmake
- script will compare file sizes.
-
- (ADB - 2020/02/24, HDFFV-11036)
-
- - Update CMake minimum version to 3.12
-
- Updated CMake minimum version to 3.12 and added version checks
- for Windows features.
-
- (ADB - 2020/02/05, TRILABS-142)
-
- - Fixed CMake include properties for Fortran libraries
-
- Corrected the library properties for Fortran to use the
- correct path for the Fortran module files.
-
- (ADB - 2020/02/04, HDFFV-11012)
-
- - Added common warnings files for gnu and intel
-
- Added warnings files to use one common set of flags
- during configure for both autotools and CMake build
- systems. The initial implementation only affects a
- general set of flags for gnu and intel compilers.
-
- (ADB - 2020/01/17)
-
- - Added new options to CMake for control of testing
-
- Added CMake options (default ON);
- HDF5_TEST_SERIAL AND/OR HDF5_TEST_PARALLEL
- combined with:
- HDF5_TEST_TOOLS
- HDF5_TEST_EXAMPLES
- HDF5_TEST_SWMR
- HDF5_TEST_FORTRAN
- HDF5_TEST_CPP
- HDF5_TEST_JAVA
-
- (ADB - 2020/01/15, HDFFV-11001)
-
- - Added Clang sanitizers to CMake for analyzer support if compiler is clang.
-
- Added CMake code and files to execute the Clang sanitizers if
- HDF5_ENABLE_SANITIZERS is enabled and the USE_SANITIZER option
- is set to one of the following:
- Address
- Memory
- MemoryWithOrigins
- Undefined
- Thread
- Leak
- 'Address;Undefined'
-
- (ADB - 2019/12/12, TRILAB-135)
-
- - Update CMake for VS2019 support
-
- CMake added support for VS2019 in version 3.15. Changes to the CMake
- generator setting required changes to scripts. Also updated version
- references in CMake files as necessary.
-
- (ADB - 2019/11/18, HDFFV-10962)
-
- - Update CMake options to match new autotools options
-
- Add configure options (autotools - CMake):
- enable-asserts HDF5_ENABLE_ASSERTS
- enable-symbols HDF5_ENABLE_SYMBOLS
- enable-profiling HDF5_ENABLE_PROFILING
- enable-optimization HDF5_ENABLE_OPTIMIZATION
- In addition NDEBUG is no longer forced defined and relies on the CMake
- process.
-
- (ADB - 2019/10/07, HDFFV-100901, HDFFV-10637, TRILAB-97)
+ (JTH - 2022/03/01)
Library:
--------
- - Adds new file driver-level memory copy operation for
- "ctl" callback and updates compact dataset I/O routines
- to utilize it
-
- When accessing an HDF5 file with a file driver that uses
- memory allocated in special ways (e.g., without standard
- library's `malloc`), a crash could be observed when HDF5
- tries to perform `memcpy` operations on such a memory
- region.
-
- These changes add a new H5FD_FEAT_MEMMANAGE VFD feature
- flag, which, if specified as supported by a VFD, will
- inform HDF5 that the VFD either uses special memory
- management routines or wishes to perform memory management
- in a specific way. Therefore, this flag instructs HDF5 to
- ask the file driver to perform memory management for
- certain operations.
-
- These changes also introduce a new "ctl" callback
- operation identified by the H5FD_CTL__MEM_COPY op code.
- This operation simply asks a VFD to perform a memory copy.
- The arguments to this operation are passed to the "ctl"
- callback's "input" parameter as a pointer to a struct
- defined as:
-
- struct H5FD_ctl_memcpy_args_t {
- void * dstbuf; /**< Destination buffer */
- hsize_t dst_off; /**< Offset within destination buffer */
- const void *srcbuf; /**< Source buffer */
- hsize_t src_off; /**< Offset within source buffer */
- size_t len; /**< Length of data to copy from source buffer */
- } H5FD_ctl_memcpy_args_t;
-
- Further, HDF5's compact dataset I/O routines were
- identified as a problematic area that could cause a crash
- for VFDs that make use of special memory management. Those
- I/O routines were therefore updated to make use of this new
- "ctl" callback operation in order to ask the underlying
- file driver to correctly handle memory copies.
-
- (JTH - 2021/09/28)
-
- - Adds new "ctl" callback to VFD H5FD_class_t structure
- with the following prototype:
-
- herr_t (*ctl)(H5FD_t *file, uint64_t op_code,
- uint64_t flags, const void *input,
- void **output);
-
- This newly-added "ctl" callback allows Virtual File
- Drivers to intercept and handle arbitrary operations
- identified by an operation code. Its parameters are
- as follows:
-
- `file` [in] - A pointer to the file to be operated on
- `op_code` [in] - The operation code identifying the
- operation to be performed
- `flags` [in] - Flags governing the behavior of the
- operation performed (see H5FDpublic.h
- for a list of valid flags)
- `input` [in] - A pointer to arguments passed to the
- VFD performing the operation
- `output` [out] - A pointer for the receiving VFD to
- use for output from the operation
-
- (JRM - 2021/08/16)
-
- - Change how the release part of version, in major.minor.release is checked
- for compatibility
-
- The HDF5 library uses a function, H5check_version, to check that
- the version defined in the header files, which is used to compile an
- application is compatible with the version codified in the library, which
- the application loads at runtime. This previously required an exact match
- or the library would print a warning, dump the build settings and then
- abort or continue. An environment variable controlled the logic.
-
- Now the function first checks that the library release version, in
- major.minor.release, is not older than the version in the headers.
- Secondly, if the release version is different, it checks if either
- the library version or the header version is in the exception list, in
- which case the release part of version, in major.minor.release, must
- be exact. An environment variable still controls the logic.
-
- (ADB - 2021/07/27)
-
- - gcc warning suppression macros were moved out of H5public.h
-
- The HDF5 library uses a set of macros to suppress warnings on gcc.
- These warnings were originally located in H5public.h so that the
- multi VFD (which only uses public headers) could also make use of them
- but internal macros should not be publicly exposed like this.
-
- These macros have now been moved to H5private.h. Pending future multi
- VFD refactoring, the macros have been duplicated in H5FDmulti.c to
- suppress the format string warnings there.
-
- (DER - 2021/06/03)
-
- - H5Gcreate1() now rejects size_hint parameters larger than UINT32_MAX
-
- The size_hint value is ultimately stored in a uint32_t struct field,
- so specifying a value larger than this on a 64-bit machine can cause
- undefined behavior including crashing the system.
-
- The documentation for this API call was also incorrect, stating that
- passing a negative value would cause the library to use a default
- value. Instead, passing a "negative" value actually passes a very large
- value, which is probably not what the user intends and can cause
- crashes on 64-bit systems.
-
- The Doxygen documentation has been updated and passing values larger
- than UINT32_MAX for size_hint will now produce a normal HDF5 error.
-
- (DER - 2021/04/29, HDFFV-11241)
-
-
- - H5Pset_fapl_log() no longer crashes when passed an invalid fapl ID
-
- When passed an invalid fapl ID, H5Pset_fapl_log() would usually
- segfault when attempting to free an uninitialized pointer in the error
- handling code. This behavior is more common in release builds or
- when the memory sanitization checks were not selected as a build
- option.
-
- The pointer is now correctly initialized and the API call now
- produces a normal HDF5 error when fed an invalid fapl ID.
-
- (DER - 2021/04/28, HDFFV-11240)
-
- - Fixes a segfault when H5Pset_mdc_log_options() is called multiple times
-
- The call incorrectly attempts to free an internal copy of the previous
- log location string, which causes a segfault. This only happens
- when the call is invoked multiple times on the same property list.
- On the first call to a given fapl, the log location is set to NULL so
- the segfault does not occur.
-
- The string is now handled properly and the segfault no longer occurs.
-
- (DER - 2021/04/27, HDFFV-11239)
-
- - HSYS_GOTO_ERROR now emits the results of GetLastError() on Windows
-
- HSYS_GOTO_ERROR is an internal macro that is used to produce error
- messages when system calls fail. These strings include errno and the
- the associated strerror() value, which are not particularly useful
- when a Win32 API call fails.
-
- On Windows, this macro has been updated to include the result of
- GetLastError(). When a system call fails on Windows, usually only
- one of errno and GetLastError() will be useful, however we emit both
- for the user to parse. The Windows error message is not emitted as
- it would be awkward to free the FormatMessage() buffer given the
- existing HDF5 error framework. Users will have to look up the error
- codes in MSDN.
-
- The format string on Windows has been changed from:
-
- "%s, errno = %d, error message = '%s'"
-
- to:
-
- "%s, errno = %d, error message = '%s', Win32 GetLastError() = %"PRIu32""
-
- for those inclined to parse it for error values.
-
- (DER - 2021/03/21)
-
- - File locking now works on Windows
-
- Since version 1.10.0, the HDF5 library has used a file locking scheme
- to help enforce one reader at a time accessing an HDF5 file, which can
- be helpful when setting up readers and writers to use the single-
- writer/multiple-readers (SWMR) access pattern.
-
- In the past, this was only functional on POSIX systems where flock() or
- fcntl() were present. Windows used a no-op stub that always succeeded.
-
- HDF5 now uses LockFileEx() and UnlockFileEx() to lock the file using the
- same scheme as POSIX systems. We lock the entire file when we set up the
- locks (by passing DWORDMAX as both size parameters to LockFileEx()).
-
- (DER - 2021/03/19, HDFFV-10191)
-
- - H5Epush_ret() now requires a trailing semicolon
-
- H5Epush_ret() is a function-like macro that has been changed to
- contain a `do {} while(0)` loop. Consequently, a trailing semicolon
- is now required to end the `while` statement. Previously, a trailing
- semi would work, but was not mandatory. This change was made to allow
- clang-format to correctly format the source code.
-
- (SAM - 2021/03/03)
-
- - Improved performance of H5Sget_select_elem_pointlist
-
- Modified library to cache the point after the last block of points
- retrieved by H5Sget_select_elem_pointlist, so a subsequent call to the
- same function to retrieve the next block of points from the list can
- proceed immediately without needing to iterate over the point list.
-
- (NAF - 2021/01/19)
-
- - Replaced H5E_ATOM with H5E_ID in H5Epubgen.h
-
- The term "atom" is archaic and not in line with current HDF5 library
- terminology, which uses "ID" instead. "Atom" has mostly been purged
- from the library internals and this change removes H5E_ATOM from
- the H5Epubgen.h (exposed via H5Epublic.h) and replaces it with
- H5E_ID.
-
- (DER - 2020/11/24, HDFFV-11190)
-
- - Add a new public function H5Ssel_iter_reset
-
- This function resets a dataspace selection iterator back to an
- initial state so that it may be used for iteration once more.
- This can be useful when needing to iterate over a selection
- multiple times without having to repeatedly create/destroy
- a selection iterator for that dataspace selection.
-
- (JTH - 2020/09/18)
-
- - Remove HDFS VFD stubs
-
- The original implementation of the HDFS VFD included non-functional
- versions of the following public API calls when the HDFS VFD is
- not built as a part of the HDF5 library:
-
- * H5FD_hdfs_init()
- * H5Pget_fapl_hdfs()
- * H5Pset_fapl_hdfs()
-
- They will remain present in HDF5 1.10 and HDF5 1.12 releases
- for binary compatibility purposes but have been removed as of 1.14.0.
-
- Note that this has nothing to do with the real HDFS VFD API calls
- that are fully functional when the HDFS VFD is configured and built.
-
- We simply changed:
-
- #ifdef LIBHDFS
- <real API call>
- #else
- <useless stub>
- #endif
-
- to:
-
- #ifdef LIBHDFS
- <real API call>
- #endif
-
- Which is how the other optional VFDs are handled.
-
- (DER - 2020/08/27)
-
- - Add Mirror VFD
-
- Use TCP/IP sockets to perform write-only (W/O) file I/O on a remote
- machine. Must be used in conjunction with the Splitter VFD.
-
- (JOS - 2020/03/13, TBD)
-
- - Add Splitter VFD
-
- Maintain separate R/W and W/O channels for "concurrent" file writes
- to two files using a single HDF5 file handle.
-
- (JOS - 2020/03/13, TBD)
-
- - Refactored public exposure of haddr_t type in favor of "object tokens"
-
- To better accommodate HDF5 VOL connectors where "object addresses in a file"
- may not make much sense, the following changes were made to the library:
-
- * Introduced new H5O_token_t "object token" type, which represents a
- unique and permanent identifier for referencing an HDF5 object within
- a container; these "object tokens" are meant to replace object addresses.
- Along with the new type, a new H5Oopen_by_token API call was introduced
- to open an object by a token, similar to how object addresses were
- previously used with H5Oopen_by_addr.
-
- * Introduced new H5Lget_info2, H5Lget_info_by_idx2, H5Literate2, H5Literate_by_name2,
- H5Lvisit2 and H5Lvisit_by_name2 API calls, along with their associated H5L_info2_t
- struct and H5L_iterate2_t callback function, which work with the newly-introduced
- object tokens, instead of object addresses. The original functions have been
- renamed to version 1 functions and are deprecated in favor of the new version 2
- functions. The H5L_info_t and H5L_iterate_t types have been renamed to version 1
- types and are now deprecated in favor of their version 2 counterparts. For each of
- the functions and types, compatibility macros take place of the original symbols.
-
- * Introduced new H5Oget_info3, H5Oget_info_by_name3, H5Oget_info_by_idx3,
- H5Ovisit3 and H5Ovisit_by_name3 API calls, along with their associated H5O_info2_t
- struct and H5O_iterate2_t callback function, which work with the newly-introduced
- object tokens, instead of object addresses. The version 2 functions are now
- deprecated in favor of the version 3 functions. The H5O_info_t and H5O_iterate_t
- types have been renamed to version 1 types and are now deprecated in favor of their
- version 2 counterparts. For each, compatibility macros take place of the original
- symbols.
-
- * Introduced new H5Oget_native_info, H5Oget_native_info_by_name and
- H5Oget_native_info_by_idx API calls, along with their associated H5O_native_info_t
- struct, which are used to retrieve the native HDF5 file format-specific information
- about an object. This information (such as object header info and B-tree/heap info)
- has been removed from the new H5O_info2_t struct so that the more generic
- H5Oget_info(_by_name/_by_idx)3 routines will not try to retrieve it for non-native
- VOL connectors.
-
- * Added new H5Otoken_cmp, H5Otoken_to_str and H5Otoken_from_str routines to compare
- two object tokens, convert an object token into a nicely-readable string format and
- to convert an object token string back into a real object token, respectively.
-
- (DER, QAK, JTH - 2020/01/16)
-
- - Add new public function H5Sselect_adjust.
-
- This function shifts a dataspace selection by a specified logical offset
- within the dataspace extent. This can be useful for VOL developers to
- implement chunked datasets.
-
- (NAF - 2019/11/18)
-
- - Add new public function H5Sselect_project_intersection.
-
- This function computes the intersection between two dataspace selections
- and projects that intersection into a third selection. This can be useful
- for VOL developers to implement chunked or virtual datasets.
-
- (NAF - 2019/11/13, ID-148)
-
- - Add new public function H5VLget_file_type.
-
- This function returns a datatype equivalent to the supplied datatype but
- with the location set to be in the file. This datatype can then be used
- with H5Tconvert to convert data between file and in-memory representation.
- This function is intended for use only by VOL connector developers.
-
- (NAF - 2019/11/08, ID-127)
+ -
Parallel Library:
@@ -865,651 +79,69 @@ New Features
Fortran Library:
----------------
- - H5Fget_name_f fixed to handle correctly trailing whitespaces and
- newly allocated buffers.
-
- (MSB - 2021/08/30, github-826,972)
-
- - Add wrappers for H5Pset/get_file_locking() API calls
-
- h5pget_file_locking_f()
- h5pset_file_locking_f()
-
- See the configure option discussion for HDFFV-11092 (above) for more
- information on the file locking feature and how it's controlled.
+ -
- (DER - 2020/07/30, HDFFV-11092)
C++ Library:
------------
- - Add wrappers for H5Pset/get_file_locking() API calls
-
- FileAccPropList::setFileLocking()
- FileAccPropList::getFileLocking()
-
- See the configure option discussion for HDFFV-11092 (above) for more
- information on the file locking feature and how it's controlled.
-
- (DER - 2020/07/30, HDFFV-11092)
+ -
Java Library:
-------------
- - Replaced HDF5AtomException with HDF5IdException
-
- Since H5E_ATOM changed to H5E_ID in the C library, the Java exception
- that wraps the error category was also renamed. Its functionality
- remains unchanged aside from the name.
-
- (See also the HDFFV-11190 note in the C library section)
-
- (DER - 2020/11/24, HDFFV-11190)
-
- - Added new H5S functions.
-
- H5Sselect_copy, H5Sselect_shape_same, H5Sselect_adjust,
- H5Sselect_intersect_block, H5Sselect_project_intersection,
- H5Scombine_hyperslab, H5Smodify_select, H5Scombine_select
- wrapper functions added.
-
- (ADB - 2020/10/27, HDFFV-10868)
-
- - Add wrappers for H5Pset/get_file_locking() API calls
-
- H5Pset_file_locking()
- H5Pget_use_file_locking()
- H5Pget_ignore_disabled_file_locking()
-
- Unlike the C++ and Fortran wrappers, there are separate getters for the
- two file locking settings, each of which returns a boolean value.
-
- See the configure option discussion for HDFFV-11092 (above) for more
- information on the file locking feature and how it's controlled.
-
- (DER - 2020/07/30, HDFFV-11092)
-
- - Added ability to test java library with VOLs.
-
- Created a new CMake script that combines the java and vol test scripts.
-
- (ADB - 2020/02/03, HDFFV-10996)
-
- - Tests fail for non-English locales.
-
- In the JUnit tests with a non-English locale, only the part before
- the decimal comma is replaced by XXXX and this leads to a comparison
- error. Changed the regex for the Time substitution.
-
- (ADB - 2020/01/09, HDFFV-10995)
+ -
Tools:
------
- - Added a new (unix ONLY) parallel meta tool 'h5dwalk', which utilizes the
- mpifileutils (https://hpc.github.io/mpifileutils) open source utility
- library to enable parallel execution of other HDF5 tools.
- This approach can greatly enhance the serial hdf5 tool performance over large
- collections of files by utilizing MPI parallelism to distribute an application
- load over many independent MPI ranks and files.
-
- An introduction to the mpifileutils library and initial 'User Guide' for
- the new 'h5dwalk" tool can be found at:
- https://github.com/HDFGroup/hdf5doc/tree/master/RFCs/HDF5/tools/parallel_tools
-
- (RAW - 2021/10/25)
-
- - Refactored the perform tools and removed depends on test library.
-
- Moved the perf and h5perf tools from tools/test/perform to
- tools/src/h5perf so that they can be installed. This required
- that the test library dependency be removed by copying the
- needed functions from h5test.c.
- The standalone scripts and other perform tools remain in the
- tools/test/perform folder.
-
- (ADB - 2021/08/10)
-
- - Removed partial long exceptions
-
- Some of the tools accepted shortened versions of the long options
- (ex: --datas instead of --dataset). These were implemented inconsistently,
- are difficult to maintian, and occasionally block useful long option
- names. These partial long options have been removed from all the tools.
-
- (DER - 2021/08/03)
-
- - h5repack added help text for user-defined filters.
-
- Added help text line that states the valid values of the filter flag
- for user-defined filters;
- filter_flag: 1 is OPTIONAL or 0 is MANDATORY
-
- (ADB - 2021/01/14, HDFFV-11099)
-
- - Added h5delete tool
-
- Deleting HDF5 storage when using the VOL can be tricky when the VOL
- does not create files. The h5delete tool is a simple wrapper around
- the H5Fdelete() API call that uses the VOL specified in the
- HDF5_VOL_CONNECTOR environment variable to delete a "file". If
- the call to H5Fdelete() fails, the tool will attempt to use
- the POSIX remove(3) call to remove the file.
-
- Note that the HDF5 library does currently have support for
- H5Fdelete() in the native VOL connector.
-
- (DER - 2020/12/16)
-
- - h5repack added options to control how external links are handled.
-
- Currently h5repack preserves external links and cannot copy and merge
- data from the external files. Two options, merge and prune, were added to
- control how to merge data from an external link into the resulting file.
- --merge Follow external soft link recursively and merge data.
- --prune Do not follow external soft links and remove link.
- --merge --prune Follow external link, merge data and remove dangling link.
-
- (ADB - 2020/08/05, HDFFV-9984)
-
- - h5repack was fixed to repack the reference attributes properly.
- The code line that checks if the update of reference inside a compound
- datatype is misplaced outside the code block loop that carries out the
- check. In consequence, the next attribute that is not the reference
- type was repacked again as the reference type and caused the failure of
- repacking. The fix is to move the corresponding code line to the correct
- code block.
-
- (KY -2020/02/07, HDFFV-11014)
-
- - h5diff was updated to use the new reference APIs.
-
- h5diff uses the new reference APIs to compare references.
- Attribute references can also be compared.
-
- (ADB - 2019/12/19, HDFFV-10980)
-
- - h5dump and h5ls were updated to use the new reference APIs.
-
- The tools library now use the new reference APIs to inspect a
- file. Also the DDL spec was updated to reflect the format
- changes produced with the new APIs. The export API and support
- functions in the JNI were updated to match.
-
- (ADB - 2019/12/06, HDFFV-10876 and HDFFV-10877)
+ -
High-Level APIs:
----------------
- - added set/get for unsigned long long attributes
-
- The attribute writing high-level API has been expanded to include
- public set/get functions for ULL attributes, analogously to the
- existing set/get for other types.
+ -
- (AF - 2021/09/08)
C Packet Table API:
-------------------
-
+
Internal header file:
---------------------
-
+
Documentation:
--------------
-
+
Support for new platforms, languages and compilers
==================================================
-
-Bug Fixes since HDF5-1.12.0 release
+
+Bug Fixes since HDF5-1.13.1 release
===================================
Library
-------
- - Fixed an H5Pget_filter_by_id1/2() assert w/ out of range filter IDs
-
- Both H5Pget_filter_by_id1 and 2 did not range check the filter ID, which
- could trip as assert in debug versions of the library. The library now
- returns a normal HDF5 error when the filter ID is out of range.
-
- (DER - 2021/11/23, HDFFV-11286)
-
- - Fixed an issue with collective metadata reads being permanently disabled
- after a dataset chunk lookup operation. This would usually cause a
- mismatched MPI_Bcast and MPI_ERR_TRUNCATE issue in the library for
- simple cases of H5Dcreate() -> H5Dwrite() -> H5Dcreate().
-
- (JTH - 2021/11/08, HDFFV-11090)
-
- - Fixed cross platform incompatibility of references within variable length
- types
-
- Reference types within variable length types previously could not be
- read on a platform with different endianness from where they were
- written. Fixed so cross platform portability is restored.
-
- (NAF - 2021/09/30)
-
- - Detection of simple data transform function "x"
-
- In the case of the simple data transform function "x" the (parallel)
- library recognizes this is the same as not applying this data transform
- function. This improves the I/O performance. In the case of the parallel
- library, it also avoids breaking to independent I/O, which makes it
- possible to apply a filter when writing or reading data to or from
- the HDF5 file.
-
- (JWSB - 2021/09/13)
-
- - Fixed an invalid read and memory leak when parsing corrupt file space
- info messages
-
- When the corrupt file from CVE-2020-10810 was parsed by the library,
- the code that imports the version 0 file space info object header
- message to the version 1 struct could read past the buffer read from
- the disk, causing an invalid memory read. Not catching this error would
- cause downstream errors that eventually resulted in a previously
- allocated buffer to be unfreed when the library shut down. In builds
- where the free lists are in use, this could result in an infinite loop
- and SIGABRT when the library shuts down.
-
- We now track the buffer size and raise an error on attempts to read
- past the end of it.
-
- (DER - 2021/08/12, HDFFV-11053)
-
-
- - Fixed CVE-2018-14460
-
- The tool h5repack produced a segfault when the rank in dataspace
- message was corrupted, causing invalid read while decoding the
- dimension sizes.
-
- The problem was fixed by ensuring that decoding the dimension sizes
- and max values will not go beyond the end of the buffer.
-
- (BMR - 2021/05/12, HDFFV-11223)
-
- - Fixed CVE-2018-11206
-
- The tool h5dump produced a segfault when the size of a fill value
- message was corrupted and caused a buffer overflow.
-
- The problem was fixed by verifying the fill value's size
- against the buffer size before attempting to access the buffer.
-
- (BMR - 2021/03/15, HDFFV-10480)
-
- - Fixed CVE-2018-14033 (same issue as CVE-2020-10811)
-
- The tool h5dump produced a segfault when the storage size message
- was corrupted and caused a buffer overflow.
-
- The problem was fixed by verifying the storage size against the
- buffer size before attempting to access the buffer.
-
- (BMR - 2021/03/15, HDFFV-11159/HDFFV-11049)
-
- - Remove underscores on header file guards
-
- Header file guards used a variety of underscores at the beginning of the define.
-
- Removed all leading (some trailing) underscores from header file guards.
-
- (ADB - 2021/03/03, #361)
-
- - Fixed a segmentation fault
-
- A segmentation fault occurred with a Mathworks corrupted file.
-
- A detection of accessing a null pointer was added to prevent the problem.
-
- (BMR - 2021/02/19, HDFFV-11150)
-
- - Fixed issue with MPI communicator and info object not being
- copied into new FAPL retrieved from H5F_get_access_plist
-
- Added logic to copy the MPI communicator and info object into
- the output FAPL. MPI communicator is retrieved from the VFD, while
- the MPI info object is retrieved from the file's original FAPL.
-
- (JTH - 2021/02/15, HDFFV-11109)
-
- - Fixed problems with vlens and refs inside compound using
- H5VLget_file_type()
-
- Modified library to properly ref count H5VL_object_t structs and only
- consider file vlen and reference types to be equal if their files are
- the same.
-
- (NAF - 2021/01/22)
-
- - Fixed CVE-2018-17432
-
- The tool h5repack produced a segfault on a corrupted file which had
- invalid rank for scalar or NULL datatype.
-
- The problem was fixed by modifying the dataspace encode and decode
- functions to detect and report invalid rank. h5repack now fails
- with an error message for the corrupted file.
-
- (BMR - 2020/10/26, HDFFV-10590)
-
- - Creation of dataset with optional filter
-
- When the combination of type, space, etc doesn't work for filter
- and the filter is optional, it was supposed to be skipped but it was
- not skipped and the creation failed.
-
- Allowed the creation of the dataset in such a situation.
-
- (BMR - 2020/08/13, HDFFV-10933)
-
- - Explicitly declared dlopen to use RTLD_LOCAL
-
- dlopen documentation states that if neither RTLD_GLOBAL nor
- RTLD_LOCAL are specified, then the default behavior is unspecified.
- The default on linux is usually RTLD_LOCAL while macos will default
- to RTLD_GLOBAL.
-
- (ADB - 2020/08/12, HDFFV-11127)
-
- - H5Sset_extent_none() sets the dataspace class to H5S_NO_CLASS which
- causes asserts/errors when passed to other dataspace API calls.
-
- H5S_NO_CLASS is an internal class value that should not have been
- exposed via a public API call.
-
- In debug builds of the library, this can cause assert() function to
- trip. In non-debug builds, it will produce normal library errors.
-
- The new library behavior is for H5Sset_extent_none() to convert
- the dataspace into one of type H5S_NULL, which is better handled
- by the library and easier for developers to reason about.
-
- (DER - 2020/07/27, HDFFV-11027)
-
- - Fixed issues CVE-2018-13870 and CVE-2018-13869
-
- When a buffer overflow occurred because a name length was corrupted
- and became very large, h5dump crashed on memory access violation.
-
- A check for reading pass the end of the buffer was added to multiple
- locations to prevent the crashes and h5dump now simply fails with an
- error message when this error condition occurs.
-
- (BMR - 2020/07/22, HDFFV-11120 and HDFFV-11121)
-
- - Fixed the segmentation fault when reading attributes with multiple threads
-
- It was reported that the reading of attributes with variable length string
- datatype will crash with segmentation fault particularly when the number of
- threads is high (>16 threads). The problem was due to the file pointer that
- was set in the variable length string datatype for the attribute. That file
- pointer was already closed when the attribute was accessed.
-
- The problem was fixed by setting the file pointer to the current opened file pointer
- when the attribute was accessed. Similar patch up was done before when reading
- dataset with variable length string datatype.
-
- (VC - 2020/07/13, HDFFV-11080)
-
- - Fixed CVE-2020-10810
-
- The tool h5clear produced a segfault during an error recovery in
- the superblock decoding. An internal pointer was reset to prevent
- further accessing when it is not assigned with a value.
-
- (BMR - 2020/06/29, HDFFV-11053)
-
- - Fixed CVE-2018-17435
-
- The tool h52gif produced a segfault when the size of an attribute
- message was corrupted and caused a buffer overflow.
-
- The problem was fixed by verifying the attribute message's size
- against the buffer size before accessing the buffer. h52gif was
- also fixed to display the failure instead of silently exiting
- after the segfault was eliminated.
-
- (BMR - 2020/06/19, HDFFV-10591)
+ -
Java Library
------------
- - JNI utility function does not handle new references.
-
- The JNI utility function for converting reference data to string did
- not use the new APIs. In addition to fixing that function, added new
- java tests for using the new APIs.
-
- (ADB - 2021/02/16, HDFFV-11212)
-
- - The H5FArray.java class, in which virtually the entire execution time
- is spent using the HDFNativeData method that converts from an array
- of bytes to an array of the destination Java type.
-
- 1. Convert the entire byte array into a 1-d array of the desired type,
- rather than performing 1 conversion per row;
- 2. Use the Java Arrays method copyOfRange to grab the section of the
- array from (1) that is desired to be inserted into the destination array.
-
- (PGT,ADB - 2020/12/13, HDFFV-10865)
-
- - Added ability to test java library with VOLs.
-
- Created a new CMake script that combines the java and vol test scripts.
-
- (ADB - 2020/02/03, HDFFV-10996)
-
- - Tests fail for non-English locales.
-
- In the JUnit tests with a non-English locale, only the part before
- the decimal comma is replaced by XXXX and this leads to a comparison
- error. Changed the regex for the Time substitution.
-
- (ADB - 2020/01/09, HDFFV-10995)
+ -
Configuration
-------------
- - Corrected path searched by CMake find_package command
-
- The install path for cmake find_package files had been changed to use
- "share/cmake"
- for all platforms. However the trailing "hdf5" directory was not removed.
- This "hdf5" additional directory has been removed.
-
- (ADB - 2021/09/27)
-
- - Corrected pkg-config compile script
-
- It was discovered that the position of the "$@" argument for the command
- in the compile script may fail on some platforms and configurations. The
- position of the "$@"command argument was moved before the pkg-config sub command.
-
- (ADB - 2021/08/30)
-
- - Fixed CMake C++ compiler flags
-
- A recent refactoring of the C++ configure files accidentally removed the
- file that executed the enable_language command for C++ needed by the
- HDFCXXCompilerFlags.cmake file. Also updated the intel warnings files,
- including adding support for windows platforms.
-
- (ADB - 2021/08/10)
-
- - Better support for libaec (open-source Szip library) in CMake
-
- Implemented better support for libaec 1.0.5 (or later) library. This version
- of libaec contains improvements for better integration with HDF5. Furthermore,
- the variable USE_LIBAEC_STATIC has been introduced to allow to make use of
- static version of libaec library. Use libaec_DIR or libaec_ROOT to set
- the location in which libaec can be found.
-
- Be aware, the Szip library of libaec 1.0.4 depends on another library within
- libaec library. This dependency is not specified in the current CMake
- configuration which means that one can not use the static Szip library of
- libaec 1.0.4 when building HDF5. This has been resolved in libaec 1.0.5.
-
- (JWSB - 2021/06/22)
-
- - Refactor CMake configure for Fortran
-
- The Fortran configure tests for KINDs reused a single output file that was
- read to form the Integer and Real Kinds defines. However, if config was run
- more then once, the CMake completed variable prevented the tests from executing
- again and the last value saved in the file was used to create the define.
- Creating separate files for each KIND solved the issue.
-
- In addition the test for H5_PAC_C_MAX_REAL_PRECISION was not pulling in
- defines for proper operation and did not define H5_PAC_C_MAX_REAL_PRECISION
- correctly for a zero value. This was fixed by supplying the required defines.
- In addition it was moved from the Fortran specific HDF5UseFortran.camke file
- to the C centric ConfigureChecks.cmake file.
-
- (ADB - 2021/06/03)
-
- - Move emscripten flag to compile flags
-
- The emscripten flag, -O0, was removed from target_link_libraries command
- to the correct target_compile_options command.
-
- (ADB - 2021/04/26 HDFFV-11083)
-
- - Remove arbitrary warning flag groups from CMake builds
-
- The arbitrary groups were created to reduce the quantity of warnings being
- reported that overwhelmed testing report systems. Considerable work has
- been accomplished to reduce the warning count and these arbitrary groups
- are no longer needed.
- Also the default for all warnings, HDF5_ENABLE_ALL_WARNINGS, is now ON.
-
- Visual Studio warnings C4100, C4706, and C4127 have been moved to
- developer warnings, HDF5_ENABLE_DEV_WARNINGS, and are disabled for normal builds.
-
- (ADB - 2021/03/22, HDFFV-11228)
-
- - Reclassify CMake messages, to allow new modes and --log-level option
-
- CMake message commands have a mode argument. By default, STATUS mode
- was chosen for any non-error message. CMake version 3.15 added additional
- modes, NOTICE, VERBOSE, DEBUG and TRACE. All message commands with a mode
- of STATUS were reviewed and most were reclassified as VERBOSE. The new
- mode was protected by a check for a CMake version of at least 3.15. If CMake
- version 3.17 or above is used, the user can use the command line option
- of "--log-level" to further restrict which message commands are displayed.
-
- (ADB - 2021/01/11, HDFFV-11144)
-
- - Fixes Autotools determination of the stat struct having an st_blocks field
-
- A missing parenthesis in an autoconf macro prevented building the test
- code used to determine if the stat struct contains the st_blocks field.
- Now that the test functions correctly, the H5_HAVE_STAT_ST_BLOCKS #define
- found in H5pubconf.h will be defined correctly on both the Autotools and
- CMake. This #define is only used in the tests and does not affect the
- HDF5 C library.
-
- (DER - 2021/01/07, HDFFV-11201)
-
- - Add missing ENV variable line to hdfoptions.cmake file
-
- Using the build options to use system SZIP/ZLIB libraries need to also
- specify the library root directory. Setting the {library}_ROOT ENV
- variable was added to the hdfoptions.cmake file.
-
- (ADB - 2020/10/19 HDFFV-11108)
+ -
Tools
-----
- - Changed how h5dump and h5ls identify long double.
-
- Long double support is not consistent across platforms. Tools will always
- identify long double as 128-bit [little/big]-endian float nn-bit precision.
- New test file created for datasets with attributes for float, double and
- long double. In addition any unknown integer or float datatype will now
- also show the number of bits for precision.
- These files are also used in the java tests.
-
- (ADB - 2021/03/24, HDFFV-11229,HDFFV-11113)
-
- - Fixed tools argument parsing.
-
- Tools parsing used the length of the option from the long array to match
- the option from the command line. This incorrectly matched a shorter long
- name option that happened to be a subset of another long option.
- Changed to match whole names.
-
- (ADB - 2021/01/19, HDFFV-11106)
-
- - The tools library was updated by standardizing the error stack process.
-
- General sequence is:
- h5tools_setprogname(PROGRAMNAME);
- h5tools_setstatus(EXIT_SUCCESS);
- h5tools_init();
- ... process the command-line (check for error-stack enable) ...
- h5tools_error_report();
- ... (do work) ...
- h5diff_exit(ret);
-
- (ADB - 2020/07/20, HDFFV-11066)
-
- - h5diff fixed a command line parsing error.
-
- h5diff would ignore the argument to -d (delta) if it is smaller than DBL_EPSILON.
- The macro H5_DBL_ABS_EQUAL was removed and a direct value comparison was used.
-
- (ADB - 2020/07/20, HDFFV-10897)
-
- - h5diff added a command line option to ignore attributes.
-
- h5diff would ignore all objects with a supplied path if the exclude-path argument is used.
- Adding the exclude-attribute argument will only exclude attributes, with the supplied path,
- from comparison.
-
- (ADB - 2020/07/20, HDFFV-5935)
-
- - h5diff added another level to the verbose argument to print filenames.
-
- Added verbose level 3 that is level 2 plus the filenames. The levels are:
- 0 : Identical to '-v' or '--verbose'
- 1 : All level 0 information plus one-line attribute status summary
- 2 : All level 1 information plus extended attribute status report
- 3 : All level 2 information plus file names
-
- (ADB - 2020/07/20, HDFFV-1005)
-
- - h5repack was fixed to repack the reference attributes properly.
-
- The code line that checks if the update of reference inside a compound
- datatype is misplaced outside the code block loop that carries out the
- check. In consequence, the next attribute that is not the reference
- type was repacked again as the reference type and caused the failure of
- repacking. The fix is to move the corresponding code line to the correct
- code block.
-
- (KY -2020/02/10, HDFFV-11014)
-
- - h5diff was updated to use the new reference APIs.
-
- h5diff uses the new reference APIs to compare references.
- Attribute references can also be compared.
-
- (ADB - 2019/12/19, HDFFV-10980)
-
- - h5dump and h5ls were updated to use the new reference APIs.
-
- The tools library now use the new reference APIs to inspect a
- file. Also the DDL spec was updated to reflect the format
- changes produced with the new APIs. The export API and support
- functions in the JNI were updated to match.
-
- (ADB - 2019/12/06, HDFFV-10876 and HDFFV-10877)
+ -
Performance
@@ -1519,18 +151,12 @@ Bug Fixes since HDF5-1.12.0 release
Fortran API
-----------
- - Corrected INTERFACE INTENT(IN) to INTENT(OUT) for buf_size in h5fget_file_image_f.
-
- (MSB - 2020/02/18, HDFFV-11029)
+ -
High-Level Library
------------------
- - Fixed HL_test_packet, test for packet table vlen of vlen.
-
- Incorrect length assignment.
-
- (ADB - 2021/10/14)
+ -
Fortran High-Level APIs
@@ -1550,23 +176,7 @@ Bug Fixes since HDF5-1.12.0 release
C++ APIs
--------
- - Added DataSet::operator=
-
- Some compilers complain if the copy constructor is given explicitly
- but the assignment operator is implicitly set to default.
-
- (2021/05/19)
-
-
- Testing
- -------
- - Stopped java/test/junit.sh.in installing libs for testing under ${prefix}
-
- Lib files needed are now copied to a subdirectory in the java/test
- directory, and on Macs the loader path for libhdf5.xxxs.so is changed
- in the temporary copy of libhdf5_java.dylib.
-
- (LRK, 2020/07/02, HDFFV-11063)
+ -
Platforms Tested
diff --git a/release_docs/USING_CMake_Examples.txt b/release_docs/USING_CMake_Examples.txt
index a12a952..920705b 100644
--- a/release_docs/USING_CMake_Examples.txt
+++ b/release_docs/USING_CMake_Examples.txt
@@ -20,15 +20,14 @@ Notes: This short instruction is written for users who want to quickly
I. Preconditions
========================================================================
- 1. We suggest you obtain the latest CMake for windows from the Kitware
+ 1. We suggest you obtain the latest CMake for your platform from the Kitware
web site. The HDF5 1.13.x product requires a minimum CMake version
of 3.12. If you are using VS2019, the minimum version is 3.15.
2. You have installed the HDF5 library built with CMake, by executing
the HDF Install Utility (the *.msi file in the binary package for
- Windows or the *.sh on Linux). If you are using a Windows platform,
- you can obtain a pre-built Windows binary from The HDF Group's website
- at www.hdfgroup.org.
+ Windows or the *.sh on Linux). You can obtain pre-built binaries
+ from The HDF Group's website at www.hdfgroup.org.
diff --git a/release_docs/USING_HDF5_CMake.txt b/release_docs/USING_HDF5_CMake.txt
index 792c719..abe1906 100644
--- a/release_docs/USING_HDF5_CMake.txt
+++ b/release_docs/USING_HDF5_CMake.txt
@@ -29,27 +29,30 @@ Notes: This short instruction is written for users who want to quickly
of using a ctest script for building and testing. See
INSTALL_CMake.txt for more information.
+ 4. See https://cmake.org/cmake/help/latest/command/find_package.html
+ for more information on the CMake "Config Mode Search Procedure".
========================================================================
I. Preconditions
========================================================================
- 1. We suggest you obtain the latest CMake for windows from the Kitware
+ 1. We suggest you obtain the latest CMake for your platform from the Kitware
web site. The HDF5 1.13.x product requires a minimum CMake version
- of 3.12.
+ of 3.12. If you are using VS2019, the minimum version is 3.15.
2. You have installed the HDF5 library built with CMake, by executing
the HDF Install Utility (the *.msi file in the binary package for
- Windows). You can obtain pre-built binaries from The HDF Group's website at
- www.hdfgroup.org.
+ Windows or the *.sh on Linux). You can obtain pre-built binaries
+ from The HDF Group's website at www.hdfgroup.org.
- 3. Set the environment variable HDF5_DIR to the installed location of
- the config files for HDF5.
+ 3. Set the HDF5_ROOT CMake variable, -DHDF5_ROOT=<install_path>
+ or environment variable, set(ENV{HDF5_ROOT} "<install_path>")
+ to the installed location of HDF5.
On Windows:
- HDF5_DIR=C:/Program Files/HDF_Group/HDF5/1.13.x/cmake
+ HDF5_ROOT=C:/Program Files/HDF_Group/HDF5/1.13.x/
On unix:
- HDF5_DIR=<install root folder>/HDF_Group/HDF5/1.13.x/cmake
+ HDF5_ROOT=<install root folder>/HDF_Group/HDF5/1.13.x/
If you are using shared libraries, you may need to add to the path
environment variable. Set the path environment variable to the
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 4ed5f67..b95409f 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -377,14 +377,6 @@ set (H5HL_HDRS
IDE_GENERATED_PROPERTIES ("H5HL" "${H5HL_HDRS}" "${H5HL_SOURCES}" )
-set (H5HP_SOURCES
- ${HDF5_SRC_DIR}/H5HP.c
-)
-set (H5HP_HDRS
-)
-IDE_GENERATED_PROPERTIES ("H5HP" "${H5HP_HDRS}" "${H5HP_SOURCES}" )
-
-
set (H5I_SOURCES
${HDF5_SRC_DIR}/H5I.c
${HDF5_SRC_DIR}/H5Idbg.c
@@ -441,16 +433,6 @@ set (H5MM_HDRS
IDE_GENERATED_PROPERTIES ("H5MM" "${H5MM_HDRS}" "${H5MM_SOURCES}" )
-set (H5MP_SOURCES
- ${HDF5_SRC_DIR}/H5MP.c
- ${HDF5_SRC_DIR}/H5MPtest.c
-)
-
-set (H5MP_HDRS
-)
-IDE_GENERATED_PROPERTIES ("H5MP" "${H5MP_HDRS}" "${H5MP_SOURCES}" )
-
-
set (H5O_SOURCES
${HDF5_SRC_DIR}/H5O.c
${HDF5_SRC_DIR}/H5Oainfo.c
@@ -749,7 +731,6 @@ set (H5_MODULE_HEADERS
${HDF5_SRC_DIR}/H5Lmodule.h
${HDF5_SRC_DIR}/H5Mmodule.h
${HDF5_SRC_DIR}/H5MFmodule.h
- ${HDF5_SRC_DIR}/H5MPmodule.h
${HDF5_SRC_DIR}/H5Omodule.h
${HDF5_SRC_DIR}/H5Pmodule.h
${HDF5_SRC_DIR}/H5PBmodule.h
@@ -787,13 +768,11 @@ set (common_SRCS
${H5HF_SOURCES}
${H5HG_SOURCES}
${H5HL_SOURCES}
- ${H5HP_SOURCES}
${H5I_SOURCES}
${H5L_SOURCES}
${H5M_SOURCES}
${H5MF_SOURCES}
${H5MM_SOURCES}
- ${H5MP_SOURCES}
${H5O_SOURCES}
${H5P_SOURCES}
${H5PB_SOURCES}
@@ -836,7 +815,6 @@ set (H5_PUBLIC_HEADERS
${H5M_HDRS}
${H5MF_HDRS}
${H5MM_HDRS}
- ${H5MP_HDRS}
${H5O_HDRS}
${H5P_HDRS}
${H5PB_HDRS}
@@ -917,8 +895,6 @@ set (H5_PRIVATE_HEADERS
${HDF5_SRC_DIR}/H5HLpkg.h
${HDF5_SRC_DIR}/H5HLprivate.h
- ${HDF5_SRC_DIR}/H5HPprivate.h
-
${HDF5_SRC_DIR}/H5Ipkg.h
${HDF5_SRC_DIR}/H5Iprivate.h
@@ -933,9 +909,6 @@ set (H5_PRIVATE_HEADERS
${HDF5_SRC_DIR}/H5MMprivate.h
- ${HDF5_SRC_DIR}/H5MPpkg.h
- ${HDF5_SRC_DIR}/H5MPprivate.h
-
${HDF5_SRC_DIR}/H5Opkg.h
${HDF5_SRC_DIR}/H5Oprivate.h
${HDF5_SRC_DIR}/H5Oshared.h
@@ -1056,20 +1029,23 @@ if (LOCAL_BATCH_TEST)
endif ()
endif ()
+#### make the H5detect program
set (lib_prog_deps)
-if (NOT EXISTS "${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c")
- add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
- target_include_directories (H5detect PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
- target_compile_definitions(H5detect PUBLIC ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS})
- TARGET_C_PROPERTIES (H5detect STATIC)
- target_link_libraries (H5detect
- PRIVATE "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>" $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib>
- )
- target_compile_options(H5detect
- PRIVATE "$<$<PLATFORM_ID:Emscripten>:-O0>"
- )
- set (lib_prog_deps ${lib_prog_deps} H5detect)
+add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
+target_include_directories (H5detect PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
+target_compile_definitions(H5detect PUBLIC ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS})
+TARGET_C_PROPERTIES (H5detect STATIC)
+target_link_libraries (H5detect
+ PRIVATE "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>" $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib>
+)
+target_compile_options(H5detect
+ PRIVATE "$<$<PLATFORM_ID:Emscripten>:-O0>"
+)
+set (lib_prog_deps ${lib_prog_deps} H5detect)
+# check if a pregenerated H5Tinit.c file is present
+if (NOT EXISTS "${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c")
+ # execute the H5detect program
if (HDF5_BATCH_H5DETECT)
configure_file (
${HDF5_SOURCE_DIR}/bin/batch/${HDF5_BATCH_H5DETECT_SCRIPT}.in.cmake
@@ -1077,9 +1053,9 @@ if (NOT EXISTS "${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c")
)
add_custom_command (
OUTPUT gen_SRCS.stamp1
+ BYPRODUCTS H5Tinit.c
COMMAND ${HDF5_BATCH_CMD}
ARGS ${HDF5_BINARY_DIR}/${HDF5_BATCH_H5DETECT_SCRIPT}
- BYPRODUCTS H5Tinit.c gen_SRCS.stamp1
COMMAND ${CMAKE_COMMAND}
ARGS -E echo "Executed batch command to create H5Tinit.c"
COMMAND ${CMAKE_COMMAND}
@@ -1090,31 +1066,30 @@ if (NOT EXISTS "${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c")
add_custom_target (gen_H5Tinit
COMMAND ${CMAKE_COMMAND} -P ${HDF5_SOURCE_DIR}/config/cmake/wait_H5Tinit.cmake
)
- set_source_files_properties (${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c PROPERTIES GENERATED TRUE)
else ()
- add_custom_command (TARGET H5detect POST_BUILD
+ add_custom_command (
+ OUTPUT gen_SRCS.stamp1
+ BYPRODUCTS H5Tinit.c
COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:H5detect>
ARGS H5Tinit.c
- BYPRODUCTS H5Tinit.c gen_SRCS.stamp1
COMMAND ${CMAKE_COMMAND}
ARGS -E touch gen_SRCS.stamp1
DEPENDS H5detect
WORKING_DIRECTORY ${HDF5_GENERATED_SOURCE_DIR}
COMMENT "Create H5Tinit.c"
)
- set_source_files_properties (${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c PROPERTIES GENERATED TRUE)
if (BUILD_SHARED_LIBS)
- add_custom_command (TARGET H5detect POST_BUILD
+ add_custom_command (
+ OUTPUT shared/shared_gen_SRCS.stamp1
+ BYPRODUCTS shared/H5Tinit.c
COMMAND ${CMAKE_COMMAND}
ARGS -E copy_if_different H5Tinit.c shared/H5Tinit.c
- BYPRODUCTS shared/H5Tinit.c shared/shared_gen_SRCS.stamp1
COMMAND ${CMAKE_COMMAND}
ARGS -E touch shared/shared_gen_SRCS.stamp1
- DEPENDS H5detect H5Tinit.c
+ DEPENDS H5detect gen_SRCS.stamp1
WORKING_DIRECTORY ${HDF5_GENERATED_SOURCE_DIR}
COMMENT "Copy H5Tinit.c to shared folder"
)
- set_source_files_properties (${HDF5_GENERATED_SOURCE_DIR}/shared/H5Tinit.c PROPERTIES GENERATED TRUE)
endif ()
endif ()
else ()
@@ -1130,16 +1105,15 @@ else ()
if (BUILD_SHARED_LIBS)
add_custom_command (
OUTPUT shared/shared_gen_SRCS.stamp1
+ BYPRODUCTS shared/H5Tinit.c
COMMAND ${CMAKE_COMMAND}
ARGS -E copy_if_different H5Tinit.c shared/H5Tinit.c
- BYPRODUCTS shared/H5Tinit.c shared/shared_gen_SRCS.stamp1
COMMAND ${CMAKE_COMMAND}
ARGS -E touch shared/shared_gen_SRCS.stamp1
- DEPENDS H5Tinit.c
+ DEPENDS H5Tinit.c gen_SRCS.stamp1
WORKING_DIRECTORY ${HDF5_GENERATED_SOURCE_DIR}
COMMENT "Copy existing H5Tinit.c to shared folder"
)
- set_source_files_properties (${HDF5_GENERATED_SOURCE_DIR}/shared/H5Tinit.c PROPERTIES GENERATED TRUE)
endif ()
endif ()
@@ -1150,6 +1124,7 @@ if (HDF5_ENABLE_FORMATTERS)
clang_format (HDF5_SRC_DETECT_FORMAT ${HDF5_SRC_DIR}/H5detect.c)
endif ()
+# make the H5make_libsettings program
add_executable (H5make_libsettings ${HDF5_SRC_DIR}/H5make_libsettings.c)
target_include_directories (H5make_libsettings PRIVATE "${HDF5_SRC_DIR};${HDF5_SRC_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
target_compile_definitions(H5make_libsettings PUBLIC ${HDF_EXTRA_C_FLAGS} ${HDF_EXTRA_FLAGS})
@@ -1169,10 +1144,12 @@ if (HDF5_ENABLE_FORMATTERS)
clang_format (HDF5_SRC_LIBSETTINGS_FORMAT H5make_libsettings)
endif ()
-add_custom_command (TARGET H5make_libsettings POST_BUILD
+# execute the H5make_libsettings program
+add_custom_command (
+ OUTPUT gen_SRCS.stamp2
+ BYPRODUCTS H5lib_settings.c
COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:H5make_libsettings>
ARGS H5lib_settings.c
- BYPRODUCTS H5lib_settings.c gen_SRCS.stamp2
COMMAND ${CMAKE_COMMAND}
ARGS -E touch gen_SRCS.stamp2
DEPENDS H5make_libsettings
@@ -1181,17 +1158,17 @@ add_custom_command (TARGET H5make_libsettings POST_BUILD
)
set_source_files_properties (${HDF5_SRC_BINARY_DIR}/H5lib_settings.c PROPERTIES GENERATED TRUE)
if (BUILD_SHARED_LIBS)
- add_custom_command (TARGET H5make_libsettings POST_BUILD
+ add_custom_command (
+ OUTPUT shared/shared_gen_SRCS.stamp2
+ BYPRODUCTS shared/H5lib_settings.c
COMMAND ${CMAKE_COMMAND}
ARGS -E copy_if_different H5lib_settings.c shared/H5lib_settings.c
- BYPRODUCTS shared/H5lib_settings.c shared/shared_gen_SRCS.stamp2
COMMAND ${CMAKE_COMMAND}
ARGS -E touch shared/shared_gen_SRCS.stamp2
- DEPENDS H5make_libsettings H5lib_settings.c
+ DEPENDS H5make_libsettings gen_SRCS.stamp2
WORKING_DIRECTORY ${HDF5_SRC_BINARY_DIR}
COMMENT "Copy H5lib_settings.c to shared folder"
)
- set_source_files_properties (${HDF5_SRC_BINARY_DIR}/shared/H5lib_settings.c PROPERTIES GENERATED TRUE)
endif ()
## all_packages="AC,B,B2,D,F,FA,FL,FS,HL,I,O,S,ST,T,Z"
@@ -1204,7 +1181,7 @@ option (HDF5_ENABLE_DEBUG_APIS "Turn on extra debug output in all packages" OFF)
if (NOT ONLY_SHARED_LIBS)
set (gen_SRCS ${HDF5_GENERATED_SOURCE_DIR}/H5Tinit.c ${HDF5_SRC_BINARY_DIR}/H5lib_settings.c)
add_custom_target (gen_${HDF5_LIB_TARGET} ALL
- DEPENDS ${lib_prog_deps} ${gen_SRCS} ${HDF5_GENERATED_SOURCE_DIR}/gen_SRCS.stamp1 ${HDF5_SRC_BINARY_DIR}/gen_SRCS.stamp2
+ DEPENDS ${lib_prog_deps} ${HDF5_GENERATED_SOURCE_DIR}/gen_SRCS.stamp1 ${HDF5_SRC_BINARY_DIR}/gen_SRCS.stamp2
COMMENT "Generation target files"
)
@@ -1243,7 +1220,7 @@ endif ()
if (BUILD_SHARED_LIBS)
set (shared_gen_SRCS ${HDF5_GENERATED_SOURCE_DIR}/shared/H5Tinit.c ${HDF5_SRC_BINARY_DIR}/shared/H5lib_settings.c)
add_custom_target (gen_${HDF5_LIBSH_TARGET} ALL
- DEPENDS ${lib_prog_deps} ${shared_gen_SRCS} ${HDF5_GENERATED_SOURCE_DIR}/shared/shared_gen_SRCS.stamp1 ${HDF5_SRC_BINARY_DIR}/shared/shared_gen_SRCS.stamp2
+ DEPENDS ${lib_prog_deps} ${HDF5_GENERATED_SOURCE_DIR}/shared/shared_gen_SRCS.stamp1 ${HDF5_SRC_BINARY_DIR}/shared/shared_gen_SRCS.stamp2
COMMENT "Shared generation target files"
)
diff --git a/src/H5.c b/src/H5.c
index d4fca9a..7142234 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -70,9 +70,9 @@ static int H5__mpi_delete_cb(MPI_Comm comm, int keyval, void *attr_val, int *fla
/* Library Private Variables */
/*****************************/
-/* Library incompatible release versions */
-const unsigned VERS_RELEASE_EXCEPTIONS[] = {0};
-const unsigned VERS_RELEASE_EXCEPTIONS_SIZE = 0;
+/* Library incompatible release versions, develop releases are incompatible by design */
+const unsigned VERS_RELEASE_EXCEPTIONS[] = {0, 1, 2};
+const unsigned VERS_RELEASE_EXCEPTIONS_SIZE = 3;
/* statically initialize block for pthread_once call used in initializing */
/* the first global mutex */
@@ -83,6 +83,8 @@ hbool_t H5_libinit_g = FALSE; /* Library hasn't been initialized */
hbool_t H5_libterm_g = FALSE; /* Library isn't being shutdown */
#endif
+hbool_t H5_use_selection_io_g = FALSE;
+
#ifdef H5_HAVE_MPE
hbool_t H5_MPEinit_g = FALSE; /* MPE Library hasn't been initialized */
#endif
@@ -144,7 +146,8 @@ herr_t
H5_init_library(void)
{
size_t i;
- herr_t ret_value = SUCCEED;
+ char * env_use_select_io = NULL;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
@@ -288,6 +291,14 @@ H5_init_library(void)
}
/* clang-format on */
+ /* Check for HDF5_USE_SELECTION_IO env variable */
+ env_use_select_io = HDgetenv("HDF5_USE_SELECTION_IO");
+ if (NULL != env_use_select_io && HDstrcmp(env_use_select_io, "") && HDstrcmp(env_use_select_io, "0") &&
+ HDstrcmp(env_use_select_io, "no") && HDstrcmp(env_use_select_io, "No") &&
+ HDstrcmp(env_use_select_io, "NO") && HDstrcmp(env_use_select_io, "false") &&
+ HDstrcmp(env_use_select_io, "False") && HDstrcmp(env_use_select_io, "FALSE"))
+ H5_use_selection_io_g = TRUE;
+
/* Debugging? */
H5__debug_mask("-all");
H5__debug_mask(HDgetenv("HDF5_DEBUG"));
@@ -954,6 +965,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum)
static int checked = 0; /* If we've already checked the version info */
static unsigned int disable_version_check = 0; /* Set if the version check should be disabled */
static const char * version_mismatch_warning = VERSION_MISMATCH_WARNING;
+ static const char * release_mismatch_warning = RELEASE_MISMATCH_WARNING;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API_NOINIT_NOERR_NOFS
@@ -974,10 +986,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum)
}
/* H5_VERS_MAJOR and H5_VERS_MINOR must match */
- /* Cast relnum to int to avoid warning for unsigned < 0 comparison
- * in first release versions */
- if (H5_VERS_MAJOR != majnum || H5_VERS_MINOR != minnum || H5_VERS_RELEASE > (int)relnum) {
-
+ if (H5_VERS_MAJOR != majnum || H5_VERS_MINOR != minnum) {
switch (disable_version_check) {
case 0:
HDfprintf(stderr, "%s%s", version_mismatch_warning,
@@ -1012,9 +1021,10 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum)
break;
} /* end switch */
- } /* end if (H5_VERS_MAJOR != majnum || H5_VERS_MINOR != minnum || H5_VERS_RELEASE > relnum) */
+ } /* end if (H5_VERS_MAJOR != majnum || H5_VERS_MINOR != minnum) */
/* H5_VERS_RELEASE should be compatible, we will only add checks for exceptions */
+ /* Library develop release versions are incompatible by design */
if (H5_VERS_RELEASE != relnum) {
for (unsigned i = 0; i < VERS_RELEASE_EXCEPTIONS_SIZE; i++) {
/* Check for incompatible headers or incompatible library */
@@ -1022,7 +1032,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum)
switch (disable_version_check) {
case 0:
HDfprintf(
- stderr, "%s%s", version_mismatch_warning,
+ stderr, "%s%s", release_mismatch_warning,
"You can, at your own risk, disable this warning by setting the environment\n"
"variable 'HDF5_DISABLE_VERSION_CHECK' to a value of '1'.\n"
"Setting it to 2 or higher will suppress the warning messages totally.\n");
@@ -1041,7 +1051,7 @@ H5check_version(unsigned majnum, unsigned minnum, unsigned relnum)
"%s'HDF5_DISABLE_VERSION_CHECK' "
"environment variable is set to %d, application will\n"
"continue at your own risk.\n",
- version_mismatch_warning, disable_version_check);
+ release_mismatch_warning, disable_version_check);
/* Mention the versions we are referring to */
HDfprintf(stderr, "Headers are %u.%u.%u, library is %u.%u.%u\n", majnum, minnum,
relnum, (unsigned)H5_VERS_MAJOR, (unsigned)H5_VERS_MINOR,
diff --git a/src/H5AC.c b/src/H5AC.c
index 6fbc63e..ac28a8c 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -303,7 +303,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_co
aux_ptr->sync_point_done = NULL;
aux_ptr->p0_image_len = 0;
- HDsprintf(prefix, "%d:", mpi_rank);
+ HDsnprintf(prefix, sizeof(prefix), "%d:", mpi_rank);
if (mpi_rank == 0) {
if (NULL == (aux_ptr->d_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
@@ -1440,21 +1440,82 @@ H5AC_resize_entry(void *thing, size_t new_size)
cache_ptr = entry_ptr->cache_ptr;
HDassert(cache_ptr);
- /* Resize the entry */
- if (H5C_resize_entry(thing, new_size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "can't resize entry")
-
#ifdef H5_HAVE_PARALLEL
- {
+ /* Log the generation of dirty bytes of metadata iff:
+ *
+ * 1) The entry is clean on entry, and this resize will dirty it
+ * (i.e. the current and new sizes are different), and
+ *
+ * 2) This is a parallel computation -- which it is if the aux_ptr
+ * is non-null.
+ *
+ * A few points to note about this section of the code:
+ *
+ * 1) This call must occur before the call to H5C_resize_entry() since
+ * H5AC__log_dirtied_entry() expects the target entry to be clean
+ * on entry.
+ *
+ * 2) This code has some basic issues in terms of the number of bytes
+ * added to the dirty bytes count.
+ *
+ * First, it adds the initial entry size to aux_ptr->dirty_bytes,
+ * not the final size. Note that this code used to use the final
+ * size, but code to support this has been removed from
+ * H5AC__log_dirtied_entry() for reasons unknown since I wrote this
+ * code.
+ *
+ * As long as all ranks do the same thing here, this probably doesn't
+ * matter much, although it will delay initiation of sync points.
+ *
+ * A more interesting point is that this code will not increment
+ * aux_ptr->dirty_bytes if a dirty entry is resized. At first glance
+ * this seems major, as particularly with the older file formats,
+ * resizes can be quite large. However, this is probably not an
+ * issue either, since such resizes will be accompanied by large
+ * amounts of dirty metadata creation in other areas -- which will
+ * cause aux_ptr->dirty_bytes to be incremented.
+ *
+ * The bottom line is that this code is probably OK, but the above
+ * points should be kept in mind.
+ *
+ * One final observation: This comment is occasioned by a bug caused
+ * by moving the call to H5AC__log_dirtied_entry() after the call to
+ * H5C_resize_entry(), and then only calling H5AC__log_dirtied_entry()
+ * if entry_ptr->is_dirty was false.
+ *
+ * Since H5C_resize_entry() marks the target entry dirty unless there
+ * is not change in size, this had the effect of not calling
+ * H5AC__log_dirtied_entry() when it should be, and corrupting
+ * the cleaned and dirtied lists used by rank 0 in the parallel
+ * version of the metadata cache.
+ *
+ * The point here is that you should be very careful when working with
+ * this code, and not modify it unless you fully understand it.
+ *
+ * JRM -- 2/28/22
+ */
+
+ if ((!entry_ptr->is_dirty) && (entry_ptr->size != new_size)) {
+
+ /* the entry is clean, and will be marked dirty in the resize
+ * operation.
+ */
H5AC_aux_t *aux_ptr;
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
- if ((!entry_ptr->is_dirty) && (NULL != aux_ptr))
+
+ if (NULL != aux_ptr) {
+
if (H5AC__log_dirtied_entry(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "can't log dirtied entry")
+ }
}
#endif /* H5_HAVE_PARALLEL */
+ /* Resize the entry */
+ if (H5C_resize_entry(thing, new_size) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTRESIZE, FAIL, "can't resize entry")
+
done:
/* If currently logging, generate a message */
if (cache_ptr != NULL && cache_ptr->log_info != NULL)
@@ -1636,9 +1697,14 @@ H5AC_unprotect(H5F_t *f, const H5AC_class_t *type, haddr_t addr, void *thing, un
if (H5AC__log_dirtied_entry((H5AC_info_t *)thing) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "can't log dirtied entry")
- if (deleted && aux_ptr->mpi_rank == 0)
- if (H5AC__log_deleted_entry((H5AC_info_t *)thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed")
+ if (deleted && aux_ptr->mpi_rank == 0) {
+ if (H5AC__log_deleted_entry((H5AC_info_t *)thing) < 0) {
+ /* If we fail to log the deleted entry, push an error but still
+ * participate in a possible sync point ahead
+ */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed")
+ }
+ }
} /* end if */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index 500a05a..7eaf751 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -304,8 +304,10 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr, had
* are used to receiving from process 0, and also load it
* into a buffer for transmission.
*/
- if (H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
+ if (H5AC__copy_candidate_list_to_buffer(cache_ptr, &chk_num_entries, &haddr_buf_ptr) < 0) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate buffer.")
+ }
HDassert(chk_num_entries == num_entries);
HDassert(haddr_buf_ptr != NULL);
@@ -428,18 +430,23 @@ H5AC__broadcast_clean_list(H5AC_t *cache_ptr)
/* allocate a buffer to store the list of entry base addresses in */
buf_size = sizeof(haddr_t) * num_entries;
- if (NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer")
-
- /* Set up user data for callback */
- udata.aux_ptr = aux_ptr;
- udata.addr_buf_ptr = addr_buf_ptr;
- udata.u = 0;
-
- /* Free all the clean list entries, building the address list in the callback */
- /* (Callback also removes the matching entries from the dirtied list) */
- if (H5SL_free(aux_ptr->c_slist_ptr, H5AC__broadcast_clean_list_cb, &udata) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for clean entries")
+ if (NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size))) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer")
+ }
+ else {
+ /* Set up user data for callback */
+ udata.aux_ptr = aux_ptr;
+ udata.addr_buf_ptr = addr_buf_ptr;
+ udata.u = 0;
+
+ /* Free all the clean list entries, building the address list in the callback */
+ /* (Callback also removes the matching entries from the dirtied list) */
+ if (H5SL_free(aux_ptr->c_slist_ptr, H5AC__broadcast_clean_list_cb, &udata) < 0) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "Can't build address list for clean entries")
+ }
+ }
/* Now broadcast the list of cleaned entries */
if (MPI_SUCCESS !=
@@ -1448,8 +1455,10 @@ H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr, haddr_t *
/* allocate buffers to store the list of entry base addresses in */
buf_size = sizeof(haddr_t) * num_entries;
- if (NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
+ if (NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size))) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
+ }
/* Now receive the list of candidate entries */
if (MPI_SUCCESS !=
@@ -1800,10 +1809,14 @@ H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f)
if (evictions_enabled) {
/* construct candidate list -- process 0 only */
- if (aux_ptr->mpi_rank == 0)
+ if (aux_ptr->mpi_rank == 0) {
+ /* If constructing candidate list fails, push an error but still participate
+ * in collective operations during following candidate list propagation
+ */
if (H5AC__construct_candidate_list(cache_ptr, aux_ptr, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) <
0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't construct candidate list.")
+ }
/* propagate and apply candidate list -- all processes */
if (H5AC__propagate_and_apply_candidate_list(f) < 0)
@@ -1899,15 +1912,21 @@ H5AC__rsp__p0_only__flush(H5F_t *f)
aux_ptr->write_permitted = FALSE;
/* Check for error on the write operation */
- if (result < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
-
- /* this code exists primarily for the test bed -- it allows us to
- * enforce POSIX semantics on the server that pretends to be a
- * file system in our parallel tests.
- */
- if (aux_ptr->write_done)
- (aux_ptr->write_done)();
+ if (result < 0) {
+ /* If write operation fails, push an error but still participate
+ * in collective operations during following cache entry
+ * propagation
+ */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
+ }
+ else {
+ /* this code exists primarily for the test bed -- it allows us to
+ * enforce POSIX semantics on the server that pretends to be a
+ * file system in our parallel tests.
+ */
+ if (aux_ptr->write_done)
+ (aux_ptr->write_done)();
+ }
} /* end if */
/* Propagate cleaned entries to other ranks. */
@@ -2019,15 +2038,21 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f)
aux_ptr->write_permitted = FALSE;
/* Check for error on the write operation */
- if (result < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed.")
-
- /* this call exists primarily for the test code -- it is used
- * to enforce POSIX semantics on the process used to simulate
- * reads and writes in t_cache.c.
- */
- if (aux_ptr->write_done)
- (aux_ptr->write_done)();
+ if (result < 0) {
+ /* If write operation fails, push an error but still participate
+ * in collective operations during following cache entry
+ * propagation
+ */
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_to_min_clean() failed.")
+ }
+ else {
+ /* this call exists primarily for the test code -- it is used
+ * to enforce POSIX semantics on the process used to simulate
+ * reads and writes in t_cache.c.
+ */
+ if (aux_ptr->write_done)
+ (aux_ptr->write_done)();
+ }
} /* end if */
if (H5AC__propagate_flushed_and_still_clean_entries_list(f) < 0)
@@ -2093,11 +2118,11 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op)
(sync_point_op == H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED));
#if H5AC_DEBUG_DIRTY_BYTES_CREATION
- HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu/%u\n", aux_ptr->mpi_rank,
+ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/m/mu) = %zu/%u/%zu/%u/%zu/%u\n", aux_ptr->mpi_rank,
aux_ptr->dirty_bytes_propagations, aux_ptr->unprotect_dirty_bytes,
aux_ptr->unprotect_dirty_bytes_updates, aux_ptr->insert_dirty_bytes,
- aux_ptr->insert_dirty_bytes_updates, aux_ptr->rename_dirty_bytes,
- aux_ptr->rename_dirty_bytes_updates);
+ aux_ptr->insert_dirty_bytes_updates, aux_ptr->move_dirty_bytes,
+ aux_ptr->move_dirty_bytes_updates);
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
/* clear collective access flag on half of the entries in the
@@ -2161,8 +2186,8 @@ H5AC__run_sync_point(H5F_t *f, int sync_point_op)
aux_ptr->unprotect_dirty_bytes_updates = 0;
aux_ptr->insert_dirty_bytes = 0;
aux_ptr->insert_dirty_bytes_updates = 0;
- aux_ptr->rename_dirty_bytes = 0;
- aux_ptr->rename_dirty_bytes_updates = 0;
+ aux_ptr->move_dirty_bytes = 0;
+ aux_ptr->move_dirty_bytes_updates = 0;
#endif /* H5AC_DEBUG_DIRTY_BYTES_CREATION */
done:
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index c853794..42bc090 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -28,10 +28,6 @@
#include "H5public.h"
#include "H5Cpublic.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/****************************************************************************
*
* structure H5AC_cache_config_t
@@ -783,7 +779,4 @@ typedef struct H5AC_cache_image_config_t {
//! <!-- [H5AC_cache_image_config_t_snip] -->
-#ifdef __cplusplus
-}
-#endif
#endif
diff --git a/src/H5B2internal.c b/src/H5B2internal.c
index c00f555..a8192df 100644
--- a/src/H5B2internal.c
+++ b/src/H5B2internal.c
@@ -17,7 +17,7 @@
* Dec 01 2016
* Quincey Koziol
*
- * Purpose: Routines for managing v2 B-tree internal ndoes.
+ * Purpose: Routines for managing v2 B-tree internal nodes.
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5B2leaf.c b/src/H5B2leaf.c
index 20ace84..f48cf5b 100644
--- a/src/H5B2leaf.c
+++ b/src/H5B2leaf.c
@@ -17,7 +17,7 @@
* Dec 01 2016
* Quincey Koziol
*
- * Purpose: Routines for managing v2 B-tree leaf ndoes.
+ * Purpose: Routines for managing v2 B-tree leaf nodes.
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5C.c b/src/H5C.c
index d34c650..fa46ff2 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -138,16 +138,6 @@ static herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr, size_t *len,
hbool_t actual);
-#if H5C_DO_SLIST_SANITY_CHECKS
-static hbool_t H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr);
-#endif /* H5C_DO_SLIST_SANITY_CHECKS */
-
-#if H5C_DO_EXTREME_SANITY_CHECKS
-static herr_t H5C__validate_lru_list(H5C_t *cache_ptr);
-static herr_t H5C__validate_pinned_entry_list(H5C_t *cache_ptr);
-static herr_t H5C__validate_protected_entry_list(H5C_t *cache_ptr);
-#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
-
#ifndef NDEBUG
static void H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t *entry,
const H5C_cache_entry_t *base_entry);
@@ -996,7 +986,7 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag
HDassert(H5F_addr_defined(addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_lru_list(cache_ptr) < 0)
+ if (H5C_validate_lru_list(cache_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1031,7 +1021,7 @@ H5C_expunge_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, unsigned flag
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if (H5C__validate_lru_list(cache_ptr) < 0)
+ if (H5C_validate_lru_list(cache_ptr) < 0)
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1138,8 +1128,8 @@ H5C_flush_cache(H5F_t *f, unsigned flags)
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1314,8 +1304,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
#if H5C_DO_EXTREME_SANITY_CHECKS
/* no need to verify that entry is not already in the index as */
/* we already make that check below. */
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1424,6 +1414,7 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
entry_ptr->serialization_count = 0;
#endif /* NDEBUG */
+ /* initialize tag list fields */
entry_ptr->tl_next = NULL;
entry_ptr->tl_prev = NULL;
entry_ptr->tag_info = NULL;
@@ -1503,8 +1494,8 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1518,23 +1509,32 @@ H5C_insert_entry(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *thing, u
#ifdef H5_HAVE_PARALLEL
if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
- coll_access = H5CX_get_coll_metadata_read();
+ coll_access = H5F_get_coll_metadata_reads(f);
entry_ptr->coll_access = coll_access;
if (coll_access) {
H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, FAIL)
/* Make sure the size of the collective entries in the cache remain in check */
- if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
- if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
- } /* end if */
+ if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) {
+ if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) {
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
+ } /* end if */
+ } /* end if */
+ else {
+ if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) {
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
+ } /* end if */
+ } /* end else */
+ } /* end if */
#endif
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1858,8 +1858,8 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
HDassert(H5F_addr_ne(old_addr, new_addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -1964,8 +1964,8 @@ H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type, haddr_t old_addr, hadd
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2011,8 +2011,7 @@ H5C_resize_entry(void *thing, size_t new_size)
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2108,8 +2107,7 @@ H5C_resize_entry(void *thing, size_t new_size)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) || (H5C_validate_pinned_entry_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2149,8 +2147,8 @@ H5C_pin_protected_entry(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2164,8 +2162,8 @@ H5C_pin_protected_entry(void *thing)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2228,8 +2226,8 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
HDassert(H5F_addr_defined(addr));
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2248,7 +2246,7 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
#ifdef H5_HAVE_PARALLEL
if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI))
- coll_access = H5CX_get_coll_metadata_read();
+ coll_access = H5F_get_coll_metadata_reads(f);
#endif /* H5_HAVE_PARALLEL */
/* first check to see if the target is in cache */
@@ -2307,9 +2305,14 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
H5MM_memcpy(((uint8_t *)entry_ptr->image_ptr) + entry_ptr->size, H5C_IMAGE_SANITY_VALUE,
H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
- if (0 == mpi_rank)
- if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
+ if (0 == mpi_rank) {
+ if (H5C__generate_image(f, cache_ptr, entry_ptr) < 0) {
+ /* If image generation fails, push an error but
+ * still participate in the following MPI_Bcast
+ */
+ HDONE_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
+ }
+ }
} /* end if */
HDassert(entry_ptr->image_ptr);
@@ -2595,16 +2598,24 @@ H5C_protect(H5F_t *f, const H5C_class_t *type, haddr_t addr, void *udata, unsign
#ifdef H5_HAVE_PARALLEL
/* Make sure the size of the collective entries in the cache remain in check */
- if (coll_access)
- if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
- if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
-#endif /* H5_HAVE_PARALLEL */
+ if (coll_access) {
+ if (H5P_USER_TRUE == H5F_COLL_MD_READ(f)) {
+ if (cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
+ } /* end if */
+ else {
+ if (cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100)
+ if (H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
+ } /* end else */
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3077,8 +3088,8 @@ H5C_unpin_entry(void *_entry_ptr)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3088,8 +3099,8 @@ H5C_unpin_entry(void *_entry_ptr)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3256,8 +3267,8 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
was_clean = !(entry_ptr->is_dirty);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -3523,8 +3534,8 @@ H5C_unprotect(H5F_t *f, haddr_t addr, void *thing, unsigned flags)
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -6058,8 +6069,8 @@ H5C__flush_ring(H5F_t *f, H5C_ring_t ring, unsigned flags)
HDassert(ring < H5C_RING_NTYPES);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -7182,8 +7193,20 @@ H5C__load_entry(H5F_t *f,
#ifdef H5_HAVE_PARALLEL
if (!coll_access || 0 == mpi_rank) {
#endif /* H5_HAVE_PARALLEL */
- if (H5F_block_read(f, type->mem_type, addr, len, image) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+
+ if (H5F_block_read(f, type->mem_type, addr, len, image) < 0) {
+
+#ifdef H5_HAVE_PARALLEL
+ if (coll_access) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDmemset(image, 0, len);
+ HDONE_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ }
+ else
+#endif
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, NULL, "Can't read image*")
+ }
+
#ifdef H5_HAVE_PARALLEL
} /* end if */
/* if the collective metadata read optimization is turned on,
@@ -7230,8 +7253,19 @@ H5C__load_entry(H5F_t *f,
* loaded thing, go get the on-disk image again (the extra portion).
*/
if (H5F_block_read(f, type->mem_type, addr + len, actual_len - len, image + len) <
- 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ 0) {
+
+#ifdef H5_HAVE_PARALLEL
+ if (coll_access) {
+ /* Push an error, but still participate in following MPI_Bcast */
+ HDmemset(image + len, 0, actual_len - len);
+ HDONE_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ }
+ else
+#endif
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't read image")
+ }
+
#ifdef H5_HAVE_PARALLEL
}
/* If the collective metadata read optimization is turned on,
@@ -7383,6 +7417,7 @@ H5C__load_entry(H5F_t *f,
entry->serialization_count = 0;
#endif /* NDEBUG */
+ /* initialize tag list fields */
entry->tl_next = NULL;
entry->tl_prev = NULL;
entry->tag_info = NULL;
@@ -7711,7 +7746,7 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5C__validate_lru_list
+ * Function: H5C_validate_lru_list
*
* Purpose: Debugging function that scans the LRU list for errors.
*
@@ -7726,15 +7761,15 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
-static herr_t
-H5C__validate_lru_list(H5C_t *cache_ptr)
+herr_t
+H5C_validate_lru_list(H5C_t *cache_ptr)
{
int32_t len = 0;
size_t size = 0;
H5C_cache_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_NOAPI(FAIL)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
@@ -7743,51 +7778,48 @@ H5C__validate_lru_list(H5C_t *cache_ptr)
(cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- if (cache_ptr->LRU_list_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
-
if ((cache_ptr->LRU_list_len == 1) &&
((cache_ptr->LRU_head_ptr != cache_ptr->LRU_tail_ptr) || (cache_ptr->LRU_head_ptr == NULL) ||
(cache_ptr->LRU_head_ptr->size != cache_ptr->LRU_list_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
if ((cache_ptr->LRU_list_len >= 1) &&
((cache_ptr->LRU_head_ptr == NULL) || (cache_ptr->LRU_head_ptr->prev != NULL) ||
(cache_ptr->LRU_tail_ptr == NULL) || (cache_ptr->LRU_tail_ptr->next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
entry_ptr = cache_ptr->LRU_head_ptr;
while (entry_ptr != NULL) {
if ((entry_ptr != cache_ptr->LRU_head_ptr) &&
((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
if ((entry_ptr != cache_ptr->LRU_tail_ptr) &&
((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
if ((entry_ptr->is_pinned) || (entry_ptr->pinned_from_client) || (entry_ptr->pinned_from_cache))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
len++;
size += entry_ptr->size;
entry_ptr = entry_ptr->next;
}
- if ((cache_ptr->LRU_list_len != len) || (cache_ptr->LRU_list_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ if ((cache_ptr->LRU_list_len != (uint32_t)len) || (cache_ptr->LRU_list_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
done:
if (ret_value != SUCCEED)
HDassert(0);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__validate_lru_list() */
+} /* H5C_validate_lru_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/*-------------------------------------------------------------------------
*
- * Function: H5C__validate_pinned_entry_list
+ * Function: H5C_validate_pinned_entry_list
*
* Purpose: Debugging function that scans the pinned entry list for
* errors.
@@ -7803,15 +7835,15 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
-static herr_t
-H5C__validate_pinned_entry_list(H5C_t *cache_ptr)
+herr_t
+H5C_validate_pinned_entry_list(H5C_t *cache_ptr)
{
int32_t len = 0;
size_t size = 0;
H5C_cache_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_NOAPI(FAIL)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
@@ -7820,54 +7852,51 @@ H5C__validate_pinned_entry_list(H5C_t *cache_ptr)
(cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- if (cache_ptr->pel_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
-
if ((cache_ptr->pel_len == 1) &&
((cache_ptr->pel_head_ptr != cache_ptr->pel_tail_ptr) || (cache_ptr->pel_head_ptr == NULL) ||
(cache_ptr->pel_head_ptr->size != cache_ptr->pel_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
if ((cache_ptr->pel_len >= 1) &&
((cache_ptr->pel_head_ptr == NULL) || (cache_ptr->pel_head_ptr->prev != NULL) ||
(cache_ptr->pel_tail_ptr == NULL) || (cache_ptr->pel_tail_ptr->next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
entry_ptr = cache_ptr->pel_head_ptr;
while (entry_ptr != NULL) {
if ((entry_ptr != cache_ptr->pel_head_ptr) &&
((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
if ((entry_ptr != cache_ptr->pel_tail_ptr) &&
((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
if (!entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
if (!(entry_ptr->pinned_from_client || entry_ptr->pinned_from_cache))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
len++;
size += entry_ptr->size;
entry_ptr = entry_ptr->next;
}
- if ((cache_ptr->pel_len != len) || (cache_ptr->pel_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
+ if ((cache_ptr->pel_len != (uint32_t)len) || (cache_ptr->pel_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
done:
if (ret_value != SUCCEED)
HDassert(0);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__validate_pinned_entry_list() */
+} /* H5C_validate_pinned_entry_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/*-------------------------------------------------------------------------
*
- * Function: H5C__validate_protected_entry_list
+ * Function: H5C_validate_protected_entry_list
*
* Purpose: Debugging function that scans the protected entry list for
* errors.
@@ -7883,15 +7912,15 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_EXTREME_SANITY_CHECKS
-static herr_t
-H5C__validate_protected_entry_list(H5C_t *cache_ptr)
+herr_t
+H5C_validate_protected_entry_list(H5C_t *cache_ptr)
{
int32_t len = 0;
size_t size = 0;
H5C_cache_entry_t *entry_ptr = NULL;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_STATIC
+ FUNC_ENTER_NOAPI(FAIL)
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
@@ -7900,54 +7929,51 @@ H5C__validate_protected_entry_list(H5C_t *cache_ptr)
(cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 1 failed")
- if (cache_ptr->pl_len < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
-
if ((cache_ptr->pl_len == 1) &&
((cache_ptr->pl_head_ptr != cache_ptr->pl_tail_ptr) || (cache_ptr->pl_head_ptr == NULL) ||
(cache_ptr->pl_head_ptr->size != cache_ptr->pl_size)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 2 failed")
if ((cache_ptr->pl_len >= 1) &&
((cache_ptr->pl_head_ptr == NULL) || (cache_ptr->pl_head_ptr->prev != NULL) ||
(cache_ptr->pl_tail_ptr == NULL) || (cache_ptr->pl_tail_ptr->next != NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 3 failed")
entry_ptr = cache_ptr->pl_head_ptr;
while (entry_ptr != NULL) {
if ((entry_ptr != cache_ptr->pl_head_ptr) &&
((entry_ptr->prev == NULL) || (entry_ptr->prev->next != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 4 failed")
if ((entry_ptr != cache_ptr->pl_tail_ptr) &&
((entry_ptr->next == NULL) || (entry_ptr->next->prev != entry_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 5 failed")
if (!entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 6 failed")
if (entry_ptr->is_read_only && (entry_ptr->ro_ref_count <= 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 7 failed")
len++;
size += entry_ptr->size;
entry_ptr = entry_ptr->next;
}
- if ((cache_ptr->pl_len != len) || (cache_ptr->pl_size != size))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 9 failed")
+ if ((cache_ptr->pl_len != (uint32_t)len) || (cache_ptr->pl_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Check 8 failed")
done:
if (ret_value != SUCCEED)
HDassert(0);
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5C__validate_protected_entry_list() */
+} /* H5C_validate_protected_entry_list() */
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/*-------------------------------------------------------------------------
*
- * Function: H5C__entry_in_skip_list
+ * Function: H5C_entry_in_skip_list
*
* Purpose: Debugging function that scans skip list to see if it
* is in present. We need this, as it is possible for
@@ -7961,8 +7987,8 @@ done:
*-------------------------------------------------------------------------
*/
#if H5C_DO_SLIST_SANITY_CHECKS
-static hbool_t
-H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
+hbool_t
+H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
{
H5SL_node_t *node_ptr;
hbool_t in_slist;
@@ -7990,7 +8016,7 @@ H5C__entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr)
}
return (in_slist);
-} /* H5C__entry_in_skip_list() */
+} /* H5C_entry_in_skip_list() */
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
/*-------------------------------------------------------------------------
@@ -8480,8 +8506,8 @@ H5C__serialize_cache(H5F_t *f)
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ((H5C__validate_protected_entry_list(cache_ptr) < 0) ||
- (H5C__validate_pinned_entry_list(cache_ptr) < 0) || (H5C__validate_lru_list(cache_ptr) < 0))
+ if ((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) || (H5C_validate_lru_list(cache_ptr) < 0))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
diff --git a/src/H5CSprivate.h b/src/H5CSprivate.h
index a238ec7..2dc28f5 100644
--- a/src/H5CSprivate.h
+++ b/src/H5CSprivate.h
@@ -17,10 +17,6 @@
#ifndef H5CSprivate_H
#define H5CSprivate_H
-#ifdef NOT_YET
-#include "H5CSpublic.h"
-#endif /* NOT_YET */
-
/* Private headers needed by this file */
#include "H5private.h"
diff --git a/src/H5CX.c b/src/H5CX.c
index b9ce682..c5bb8e4 100644
--- a/src/H5CX.c
+++ b/src/H5CX.c
@@ -1397,9 +1397,7 @@ H5CX_set_apl(hid_t *acspl_id, const H5P_libclass_t *libclass,
/* If parallel is enabled and the file driver used is the MPI-IO
* VFD, issue an MPI barrier for easier debugging if the API function
- * calling this is supposed to be called collectively. Note that this
- * happens only when the environment variable H5_COLL_BARRIER is set
- * to non 0.
+ * calling this is supposed to be called collectively.
*/
if (H5_coll_api_sanity_check_g) {
MPI_Comm mpi_comm; /* File communicator */
@@ -1425,7 +1423,7 @@ done:
* Purpose: Sanity checks and sets up collective operations.
*
* Note: Should be called for all API routines that modify file
- * file metadata but don't pass in an access property list.
+ * metadata but don't pass in an access property list.
*
* Return: Non-negative on success / Negative on failure
*
@@ -1456,9 +1454,7 @@ H5CX_set_loc(hid_t
/* If parallel is enabled and the file driver used is the MPI-IO
* VFD, issue an MPI barrier for easier debugging if the API function
- * calling this is supposed to be called collectively. Note that this
- * happens only when the environment variable H5_COLL_BARRIER is set
- * to non 0.
+ * calling this is supposed to be called collectively.
*/
if (H5_coll_api_sanity_check_g) {
MPI_Comm mpi_comm; /* File communicator */
diff --git a/src/H5CXprivate.h b/src/H5CXprivate.h
index 8ec1c59..878bcf6 100644
--- a/src/H5CXprivate.h
+++ b/src/H5CXprivate.h
@@ -16,11 +16,6 @@
#ifndef H5CXprivate_H
#define H5CXprivate_H
-/* Include package's public header */
-#ifdef NOT_YET
-#include "H5CXpublic.h"
-#endif /* NOT_YET */
-
/* Private headers needed by this file */
#include "H5private.h" /* Generic Functions */
#include "H5ACprivate.h" /* Metadata cache */
diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c
index 3434fed..8655881 100644
--- a/src/H5Cepoch.c
+++ b/src/H5Cepoch.c
@@ -78,22 +78,21 @@ static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED *thing,
/* Local Variables */
/*******************/
-const H5AC_class_t H5AC_EPOCH_MARKER[1] = {{
- /* id = */ H5AC_EPOCH_MARKER_ID,
- /* name = */ "epoch marker",
- /* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
- /* flags = */ H5AC__CLASS_NO_FLAGS_SET,
- /* get_initial_load_size = */ H5C__epoch_marker_get_initial_load_size,
- /* get_final_load_size = */ H5C__epoch_marker_get_final_load_size,
- /* verify_chksum = */ H5C__epoch_marker_verify_chksum,
- /* deserialize = */ H5C__epoch_marker_deserialize,
- /* image_len = */ H5C__epoch_marker_image_len,
- /* pre_serialize = */ H5C__epoch_marker_pre_serialize,
- /* serialize = */ H5C__epoch_marker_serialize,
- /* notify = */ H5C__epoch_marker_notify,
- /* free_icr = */ H5C__epoch_marker_free_icr,
- /* fsf_size = */ H5C__epoch_marker_fsf_size,
-}};
+const H5AC_class_t H5AC_EPOCH_MARKER[1] = {
+ {/* id = */ H5AC_EPOCH_MARKER_ID,
+ /* name = */ "epoch marker",
+ /* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
+ /* flags = */ H5AC__CLASS_NO_FLAGS_SET,
+ /* get_initial_load_size = */ H5C__epoch_marker_get_initial_load_size,
+ /* get_final_load_size = */ H5C__epoch_marker_get_final_load_size,
+ /* verify_chksum = */ H5C__epoch_marker_verify_chksum,
+ /* deserialize = */ H5C__epoch_marker_deserialize,
+ /* image_len = */ H5C__epoch_marker_image_len,
+ /* pre_serialize = */ H5C__epoch_marker_pre_serialize,
+ /* serialize = */ H5C__epoch_marker_serialize,
+ /* notify = */ H5C__epoch_marker_notify,
+ /* free_icr = */ H5C__epoch_marker_free_icr,
+ /* fsf_size = */ H5C__epoch_marker_fsf_size}};
/***************************************************************************
* Class functions for H5C__EPOCH_MAKER_TYPE:
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
index 7421c90..98c1291 100644
--- a/src/H5Cimage.c
+++ b/src/H5Cimage.c
@@ -997,6 +997,9 @@ H5C__read_cache_image(H5F_t *f, H5C_t *cache_ptr)
#endif /* H5_HAVE_PARALLEL */
/* Read the buffer (if serial access, or rank 0 of parallel access) */
+ /* NOTE: if this block read is being performed on rank 0 only, throwing
+ * an error here will cause other ranks to hang in the following MPI_Bcast.
+ */
if (H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len,
cache_ptr->image_buffer) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block")
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index 30b86b9..61c3afc 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -1011,7 +1011,7 @@ if ( ( (cache_ptr) == NULL ) || \
( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
+ (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
@@ -1034,7 +1034,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
+ (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \
@@ -1071,7 +1071,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev != NULL ) ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
+ (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
@@ -1102,7 +1102,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev != NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
- (cache_ptr)->dirty_index_size) ) || \
+ (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
@@ -1161,7 +1161,7 @@ if ( ( (cache_ptr) == NULL ) || \
}
#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
+ entry_ptr, was_clean) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len <= 0 ) || \
( (cache_ptr)->index_size <= 0 ) || \
@@ -1175,9 +1175,9 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( ( !( was_clean ) || \
- ( (cache_ptr)->clean_index_size < (old_size) ) ) && \
- ( ( (was_clean) ) || \
- ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
+ ( (cache_ptr)->clean_index_size < (old_size) ) ) && \
+ ( ( (was_clean) ) || \
+ ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
( (entry_ptr) == NULL ) || \
( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
@@ -1196,20 +1196,20 @@ if ( ( (cache_ptr) == NULL ) || \
}
#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr) \
+ entry_ptr) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_len <= 0 ) || \
( (cache_ptr)->index_size <= 0 ) || \
( (new_size) > (cache_ptr)->index_size ) || \
( (cache_ptr)->index_size != \
- ((cache_ptr)->clean_index_size + \
+ ((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( ( !((entry_ptr)->is_dirty ) || \
- ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
- ( ( ((entry_ptr)->is_dirty) ) || \
- ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
+ ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
+ ( ( ((entry_ptr)->is_dirty) ) || \
+ ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
( ( (cache_ptr)->index_len == 1 ) && \
( (cache_ptr)->index_size != (new_size) ) ) || \
( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
@@ -1465,10 +1465,10 @@ if ( ( (cache_ptr)->index_size != \
H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
(cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
- -= (entry_ptr)->size; \
+ -= (entry_ptr)->size; \
(cache_ptr)->clean_index_size += (entry_ptr)->size; \
((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
- += (entry_ptr)->size; \
+ += (entry_ptr)->size; \
H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
}
@@ -1477,18 +1477,18 @@ if ( ( (cache_ptr)->index_size != \
H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
- -= (entry_ptr)->size; \
+ -= (entry_ptr)->size; \
(cache_ptr)->dirty_index_size += (entry_ptr)->size; \
((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
- += (entry_ptr)->size; \
+ += (entry_ptr)->size; \
H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
}
#define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
+ entry_ptr, was_clean) \
{ \
H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
- entry_ptr, was_clean) \
+ entry_ptr, was_clean) \
(cache_ptr)->index_size -= (old_size); \
(cache_ptr)->index_size += (new_size); \
((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (old_size); \
@@ -1497,14 +1497,14 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->clean_index_size -= (old_size); \
((cache_ptr)->clean_index_ring_size[entry_ptr->ring])-= (old_size); \
} else { \
- (cache_ptr)->dirty_index_size -= (old_size); \
+ (cache_ptr)->dirty_index_size -= (old_size); \
((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])-= (old_size); \
} \
if((entry_ptr)->is_dirty) { \
(cache_ptr)->dirty_index_size += (new_size); \
((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])+= (new_size); \
} else { \
- (cache_ptr)->clean_index_size += (new_size); \
+ (cache_ptr)->clean_index_size += (new_size); \
((cache_ptr)->clean_index_ring_size[entry_ptr->ring])+= (new_size); \
} \
H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \
@@ -1791,7 +1791,7 @@ if ( ( (cache_ptr)->index_size != \
} else { /* slist disabled */ \
\
HDassert( (cache_ptr)->slist_len == 0 ); \
- HDassert( (cache_ptr)->slist_size == 0 ); \
+ HDassert( (cache_ptr)->slist_size == 0 ); \
} \
} /* H5C__REMOVE_ENTRY_FROM_SLIST */
@@ -2033,16 +2033,16 @@ if ( ( (cache_ptr)->index_size != \
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list, and re-insert it at the head.\
- */ \
+ */ \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* Use the dirty flag to infer whether the entry is on the clean or \
@@ -2096,16 +2096,16 @@ if ( ( (cache_ptr)->index_size != \
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list, and re-insert it at the head \
- */ \
+ */ \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
@@ -2288,28 +2288,28 @@ if ( ( (cache_ptr)->index_size != \
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list, and re-insert it at the \
- * head. \
- */ \
+ * head. \
+ */ \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* since the entry is being flushed or cleared, one would think \
- * that it must be dirty -- but that need not be the case. Use the \
- * dirty flag to infer whether the entry is on the clean or dirty \
- * LRU list, and remove it. Then insert it at the head of the \
- * clean LRU list. \
+ * that it must be dirty -- but that need not be the case. Use the \
+ * dirty flag to infer whether the entry is on the clean or dirty \
+ * LRU list, and remove it. Then insert it at the head of the \
+ * clean LRU list. \
* \
* The function presumes that a dirty entry will be either cleared \
- * or flushed shortly, so it is OK if we put a dirty entry on the \
- * clean LRU list. \
+ * or flushed shortly, so it is OK if we put a dirty entry on the \
+ * clean LRU list. \
*/ \
\
if ( (entry_ptr)->is_dirty ) { \
@@ -2350,17 +2350,17 @@ if ( ( (cache_ptr)->index_size != \
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list, and re-insert it at the \
- * head. \
- */ \
+ * head. \
+ */ \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
@@ -2424,7 +2424,7 @@ if ( ( (cache_ptr)->index_size != \
\
H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* insert the entry at the tail of the clean or dirty LRU list as \
@@ -2465,7 +2465,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- \
+ \
} else { \
\
/* modified LRU specific code */ \
@@ -2474,7 +2474,7 @@ if ( ( (cache_ptr)->index_size != \
\
H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
@@ -2558,7 +2558,7 @@ if ( ( (cache_ptr)->index_size != \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* insert the entry at the head of the clean or dirty LRU list as \
@@ -2599,7 +2599,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- \
+ \
} else { \
\
/* modified LRU specific code */ \
@@ -2608,7 +2608,7 @@ if ( ( (cache_ptr)->index_size != \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
@@ -2677,12 +2677,12 @@ if ( ( (cache_ptr)->index_size != \
HDassert( !((entry_ptr)->is_read_only) ); \
HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
HDassert( (entry_ptr)->size > 0 ); \
- \
+ \
if ( (entry_ptr)->is_pinned ) { \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
\
} else { \
@@ -2693,7 +2693,7 @@ if ( ( (cache_ptr)->index_size != \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* Similarly, remove the entry from the clean or dirty LRU list \
@@ -2739,12 +2739,12 @@ if ( ( (cache_ptr)->index_size != \
HDassert( !((entry_ptr)->is_read_only) ); \
HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
HDassert( (entry_ptr)->size > 0 ); \
- \
+ \
if ( (entry_ptr)->is_pinned ) { \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
- (cache_ptr)->pel_tail_ptr, \
- (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
\
} else { \
@@ -2755,7 +2755,7 @@ if ( ( (cache_ptr)->index_size != \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
@@ -2804,21 +2804,21 @@ if ( ( (cache_ptr)->index_size != \
HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
HDassert( (entry_ptr)->size > 0 ); \
\
- if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \
- \
+ if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) {\
+ \
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list, and re-insert it at the head. \
- */ \
+ */ \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* remove the entry from either the clean or dirty LUR list as \
@@ -2827,7 +2827,7 @@ if ( ( (cache_ptr)->index_size != \
if ( was_dirty ) { \
\
H5C__AUX_DLL_REMOVE((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_head_ptr, \
(cache_ptr)->dLRU_tail_ptr, \
(cache_ptr)->dLRU_list_len, \
(cache_ptr)->dLRU_list_size, \
@@ -2836,34 +2836,34 @@ if ( ( (cache_ptr)->index_size != \
} else { \
\
H5C__AUX_DLL_REMOVE((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_head_ptr, \
(cache_ptr)->cLRU_tail_ptr, \
(cache_ptr)->cLRU_list_len, \
(cache_ptr)->cLRU_list_size, \
- (fail_val)) \
+ (fail_val)) \
} \
\
/* insert the entry at the head of either the clean or dirty \
- * LRU list as appropriate. \
+ * LRU list as appropriate. \
*/ \
\
if ( (entry_ptr)->is_dirty ) { \
\
H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_head_ptr, \
(cache_ptr)->dLRU_tail_ptr, \
(cache_ptr)->dLRU_list_len, \
(cache_ptr)->dLRU_list_size, \
- (fail_val)) \
+ (fail_val)) \
\
} else { \
\
H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_head_ptr, \
(cache_ptr)->cLRU_tail_ptr, \
(cache_ptr)->cLRU_list_len, \
(cache_ptr)->cLRU_list_size, \
- (fail_val)) \
+ (fail_val)) \
} \
\
/* End modified LRU specific code. */ \
@@ -2872,7 +2872,7 @@ if ( ( (cache_ptr)->index_size != \
#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
+#define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
{ \
HDassert( (cache_ptr) ); \
HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
@@ -2881,21 +2881,21 @@ if ( ( (cache_ptr)->index_size != \
HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
HDassert( (entry_ptr)->size > 0 ); \
\
- if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \
- \
+ if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) {\
+ \
/* modified LRU specific code */ \
\
/* remove the entry from the LRU list, and re-insert it at the head. \
- */ \
+ */ \
\
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
(cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_len, \
(cache_ptr)->LRU_list_size, (fail_val)) \
\
/* End modified LRU specific code. */ \
@@ -2952,49 +2952,49 @@ if ( ( (cache_ptr)->index_size != \
\
if ( (entry_ptr)->coll_access ) { \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \
- (cache_ptr)->coll_list_size, \
- (entry_ptr)->size, \
- (new_size)); \
- \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \
+ (cache_ptr)->coll_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
} \
\
if ( (entry_ptr)->is_pinned ) { \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, \
- (entry_ptr)->size, \
- (new_size)); \
- \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
+ \
} else { \
\
/* modified LRU specific code */ \
\
- /* Update the size of the LRU list */ \
+ /* Update the size of the LRU list */ \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, \
- (entry_ptr)->size, \
- (new_size)); \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
\
/* Similarly, update the size of the clean or dirty LRU list as \
- * appropriate. At present, the entry must be clean, but that \
- * could change. \
+ * appropriate. At present, the entry must be clean, but that \
+ * could change. \
*/ \
\
if ( (entry_ptr)->is_dirty ) { \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (entry_ptr)->size, \
- (new_size)); \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
\
} else { \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (entry_ptr)->size, \
- (new_size)); \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
} \
\
/* End modified LRU specific code. */ \
@@ -3017,21 +3017,21 @@ if ( ( (cache_ptr)->index_size != \
\
if ( (entry_ptr)->is_pinned ) { \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
- (cache_ptr)->pel_size, \
- (entry_ptr)->size, \
- (new_size)); \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
\
} else { \
\
/* modified LRU specific code */ \
\
- /* Update the size of the LRU list */ \
+ /* Update the size of the LRU list */ \
\
- H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, \
- (entry_ptr)->size, \
- (new_size)); \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, \
+ (entry_ptr)->size, \
+ (new_size)); \
\
/* End modified LRU specific code. */ \
} \
@@ -3318,7 +3318,7 @@ if ( ( (hd_ptr) == NULL ) || \
( (Size) < (entry_ptr)->size ) || \
( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
- ( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) ||\
( ( (len) == 1 ) && \
( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
( (entry_ptr)->coll_next == NULL ) && \
@@ -3350,10 +3350,10 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
) \
) { \
HDassert(0 && "COLL DLL sanity check failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed")\
}
-#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
+#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)\
if ( ( (entry_ptr) == NULL ) || \
( (entry_ptr)->coll_next != NULL ) || \
( (entry_ptr)->coll_prev != NULL ) || \
@@ -5074,7 +5074,7 @@ H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr);
H5_DLL herr_t H5C__load_cache_image(H5F_t *f);
H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr);
H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr);
-H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed,
+H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed,
hbool_t write_permitted);
H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f);
H5_DLL herr_t H5C__serialize_cache(H5F_t *f);
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index 8a1043e..9514443 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -2292,6 +2292,16 @@ H5_DLL herr_t H5C_cache_image_status(H5F_t *f, hbool_t *load_ci_ptr, hbool_t *
H5_DLL hbool_t H5C_cache_image_pending(const H5C_t *cache_ptr);
H5_DLL herr_t H5C_get_mdc_image_info(const H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
+#if H5C_DO_SLIST_SANITY_CHECKS
+H5_DLL hbool_t H5C_entry_in_skip_list(H5C_t *cache_ptr, H5C_cache_entry_t *target_ptr);
+#endif
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+H5_DLL herr_t H5C_validate_lru_list(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_validate_pinned_entry_list(H5C_t *cache_ptr);
+H5_DLL herr_t H5C_validate_protected_entry_list(H5C_t *cache_ptr);
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
/* Logging functions */
H5_DLL herr_t H5C_start_logging(H5C_t *cache);
H5_DLL herr_t H5C_stop_logging(H5C_t *cache);
diff --git a/src/H5Cpublic.h b/src/H5Cpublic.h
index 79ece10..c65dc7c 100644
--- a/src/H5Cpublic.h
+++ b/src/H5Cpublic.h
@@ -27,10 +27,6 @@
/* Public headers needed by this file */
#include "H5public.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
enum H5C_cache_incr_mode {
H5C_incr__off,
/**<Automatic cache size increase is disabled, and the remaining increment fields are ignored.*/
@@ -61,7 +57,4 @@ enum H5C_cache_decr_mode {
/**<Automatic cache size decrease is enabled using the ageout with hit rate threshold algorithm.*/
};
-#ifdef __cplusplus
-}
-#endif
#endif
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index b85b194..4445911 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -59,6 +59,7 @@
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5MFprivate.h" /* File memory management */
+#include "H5PBprivate.h" /* Page Buffer */
#include "H5VMprivate.h" /* Vector and array functions */
/****************/
@@ -70,6 +71,7 @@
#define H5D_CHUNK_GET_NODE_INFO(map, node) \
(map->use_single ? map->single_chunk_info : (H5D_chunk_info_t *)H5SL_item(node))
#define H5D_CHUNK_GET_NEXT_NODE(map, node) (map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node))
+#define H5D_CHUNK_GET_NODE_COUNT(map) (map->use_single ? (size_t)1 : H5SL_count(map->sel_chunks))
/* Sanity check on chunk index types: commonly used by a lot of routines in this file */
#define H5D_CHUNK_STORAGE_INDEX_CHK(storage) \
@@ -239,10 +241,14 @@ typedef struct H5D_chunk_file_iter_ud_t {
#ifdef H5_HAVE_PARALLEL
/* information to construct a collective I/O operation for filling chunks */
-typedef struct H5D_chunk_coll_info_t {
- size_t num_io; /* Number of write operations */
- haddr_t *addr; /* array of the file addresses of the write operation */
-} H5D_chunk_coll_info_t;
+typedef struct H5D_chunk_coll_fill_info_t {
+ size_t num_chunks; /* Number of chunks in the write operation */
+ struct chunk_coll_fill_info {
+ haddr_t addr; /* File address of the chunk */
+ size_t chunk_size; /* Size of the chunk in the file */
+ hbool_t unfiltered_partial_chunk;
+ } * chunk_info;
+} H5D_chunk_coll_fill_info_t;
#endif /* H5_HAVE_PARALLEL */
typedef struct H5D_chunk_iter_ud_t {
@@ -257,8 +263,8 @@ typedef struct H5D_chunk_iter_ud_t {
/* Chunked layout operation callbacks */
static herr_t H5D__chunk_construct(H5F_t *f, H5D_t *dset);
static herr_t H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id);
-static herr_t H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *fm);
+static herr_t H5D__chunk_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
+ H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *fm);
static herr_t H5D__chunk_io_init_selections(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
H5D_chunk_map_t *fm);
static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
@@ -287,9 +293,6 @@ static int H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void *
/* Helper routines */
static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims,
const hsize_t *max_dims);
-static void * H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline);
-static void * H5D__chunk_mem_xfree(void *chk, const void *pline);
-static void * H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline);
static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last);
static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *udata);
static hbool_t H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *udata);
@@ -303,11 +306,10 @@ static herr_t H5D__chunk_file_cb(void *elem, const H5T_t *type, unsigned ndims
void *fm);
static herr_t H5D__chunk_mem_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords,
void *fm);
+static htri_t H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info);
static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled);
static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset);
static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush);
-static hbool_t H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, const uint32_t *chunk_dims,
- const hsize_t *chunk_scaled, const hsize_t *dset_dims);
static void * H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, hbool_t relax,
hbool_t prev_unfilt_chunk);
static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata, hbool_t dirty,
@@ -315,9 +317,9 @@ static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, size_t size);
static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk);
#ifdef H5_HAVE_PARALLEL
-static herr_t H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
- size_t chunk_size, const void *fill_buf);
-static int H5D__chunk_cmp_addr(const void *addr1, const void *addr2);
+static herr_t H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_fill_info,
+ const void *fill_buf, const void *partial_chunk_fill_buf);
+static int H5D__chunk_cmp_coll_fill_info(const void *_entry1, const void *_entry2);
#endif /* H5_HAVE_PARALLEL */
/* Debugging helper routine callback */
@@ -1066,16 +1068,17 @@ H5D__chunk_is_data_cached(const H5D_shared_t *shared_dset)
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
+H5D__chunk_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *fm)
{
const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
hssize_t old_offset[H5O_LAYOUT_NDIMS]; /* Old selection offset */
htri_t file_space_normalized = FALSE; /* File dataspace was normalized */
unsigned f_ndims; /* The number of dimensions of the file's dataspace */
- int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */
- unsigned u; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */
+ htri_t use_selection_io = FALSE; /* Whether to use selection I/O */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1129,6 +1132,11 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
if (H5D__chunk_io_init_selections(io_info, type_info, fm) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file and memory chunk selections")
+ /* Check if we're performing selection I/O and save the result */
+ if ((use_selection_io = H5D__chunk_may_use_select_io(io_info)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible")
+ io_info->use_select_io = (hbool_t)use_selection_io;
+
done:
/* Reset the global dataspace info */
fm->file_space = NULL;
@@ -1362,7 +1370,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static void *
+void *
H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline)
{
void *ret_value = NULL; /* Return value */
@@ -1393,7 +1401,7 @@ H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline)
*
*-------------------------------------------------------------------------
*/
-static void *
+void *
H5D__chunk_mem_xfree(void *chk, const void *_pline)
{
const H5O_pline_t *pline = (const H5O_pline_t *)_pline;
@@ -1417,7 +1425,7 @@ H5D__chunk_mem_xfree(void *chk, const void *_pline)
* calls H5D__chunk_mem_xfree and discards the return value.
*-------------------------------------------------------------------------
*/
-static void
+void
H5D__chunk_mem_free(void *chk, const void *_pline)
{
(void)H5D__chunk_mem_xfree(chk, _pline);
@@ -1437,7 +1445,7 @@ H5D__chunk_mem_free(void *chk, const void *_pline)
*
*-------------------------------------------------------------------------
*/
-static void *
+void *
H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline)
{
void *ret_value = NULL; /* Return value */
@@ -2460,6 +2468,78 @@ done:
} /* end H5D__chunk_cacheable() */
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_may_use_select_io
+ *
+ * Purpose: A small internal function to if it may be possible to use
+ * selection I/O.
+ *
+ * Return: TRUE or FALSE
+ *
+ * Programmer: Neil Fortner
+ * 4 May 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info)
+{
+ const H5D_t *dataset = NULL; /* Local pointer to dataset info */
+ htri_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(io_info);
+
+ dataset = io_info->dset;
+ HDassert(dataset);
+
+ /* Don't use selection I/O if it's globally disabled, there is a type
+ * conversion, or if there are filters on the dataset (for now) */
+ if (!H5_use_selection_io_g || io_info->io_ops.single_read != H5D__select_read ||
+ dataset->shared->dcpl_cache.pline.nused > 0)
+ ret_value = FALSE;
+ else {
+ hbool_t page_buf_enabled;
+
+ HDassert(io_info->io_ops.single_write == H5D__select_write);
+
+ /* Check if the page buffer is enabled */
+ if (H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW, &page_buf_enabled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled")
+ if (page_buf_enabled)
+ ret_value = FALSE;
+ else {
+ /* Check if chunks in this dataset may be cached, if so don't use
+ * selection I/O (for now). Note that chunks temporarily cached for
+ * the purpose of writing the fill value don't count, since they are
+ * immediately evicted. */
+#ifdef H5_HAVE_PARALLEL
+ /* If MPI based VFD is used and the file is opened for write access,
+ * must bypass the chunk-cache scheme because other MPI processes
+ * could be writing to other elements in the same chunk.
+ */
+ if (io_info->using_mpi_vfd && (H5F_ACC_RDWR & H5F_INTENT(dataset->oloc.file)))
+ ret_value = TRUE;
+ else {
+#endif /* H5_HAVE_PARALLEL */
+ /* Check if the chunk is too large to keep in the cache */
+ H5_CHECK_OVERFLOW(dataset->shared->layout.u.chunk.size, uint32_t, size_t);
+ if ((size_t)dataset->shared->layout.u.chunk.size > dataset->shared->cache.chunk.nbytes_max)
+ ret_value = TRUE;
+ else
+ ret_value = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
+#endif /* H5_HAVE_PARALLEL */
+ } /* end else */
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_may_use_select_io() */
+
+/*-------------------------------------------------------------------------
* Function: H5D__chunk_read
*
* Purpose: Read from a chunked dataset.
@@ -2475,16 +2555,17 @@ static herr_t
H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t H5_ATTR_UNUSED nelmts,
H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t *fm)
{
- H5SL_node_t * chunk_node; /* Current node in chunk skip list */
- H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */
- H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
- H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
- H5D_io_info_t cpt_io_info; /* Compact I/O info object */
- H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
- hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
- uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
- hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
- herr_t ret_value = SUCCEED; /*return value */
+ H5SL_node_t * chunk_node; /* Current node in chunk skip list */
+ H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */
+ uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
+ hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
+ H5S_t ** chunk_mem_spaces = NULL; /* Array of chunk memory spaces */
+ H5S_t * chunk_mem_spaces_static[8]; /* Static buffer for chunk_mem_spaces */
+ H5S_t ** chunk_file_spaces = NULL; /* Array of chunk file spaces */
+ H5S_t * chunk_file_spaces_static[8]; /* Static buffer for chunk_file_spaces */
+ haddr_t * chunk_addrs = NULL; /* Array of chunk addresses */
+ haddr_t chunk_addrs_static[8]; /* Static buffer for chunk_addrs */
+ herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_STATIC
@@ -2498,23 +2579,6 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
H5MM_memcpy(&nonexistent_io_info, io_info, sizeof(nonexistent_io_info));
nonexistent_io_info.layout_ops = *H5D_LOPS_NONEXISTENT;
- /* Set up contiguous I/O info object */
- H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info));
- ctg_io_info.store = &ctg_store;
- ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
-
- /* Initialize temporary contiguous storage info */
- H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size,
- uint32_t);
-
- /* Set up compact I/O info object */
- H5MM_memcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
- cpt_io_info.store = &cpt_store;
- cpt_io_info.layout_ops = *H5D_LOPS_COMPACT;
-
- /* Initialize temporary compact storage info */
- cpt_store.compact.dirty = &cpt_dirty;
-
{
const H5O_fill_t *fill = &(io_info->dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* Fill value status */
@@ -2532,80 +2596,215 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_
skip_missing_chunks = TRUE;
}
- /* Iterate through nodes in chunk skip list */
- chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
- while (chunk_node) {
- H5D_chunk_info_t *chunk_info; /* Chunk information */
- H5D_chunk_ud_t udata; /* Chunk index pass-through */
+ /* Different blocks depending on whether we're using selection I/O */
+ if (io_info->use_select_io) {
+ size_t num_chunks;
+ size_t element_sizes[2] = {type_info->dst_type_size, 0};
+ void * bufs[2] = {io_info->u.rbuf, NULL};
+
+ /* Cache number of chunks */
+ num_chunks = H5D_CHUNK_GET_NODE_COUNT(fm);
+
+ /* Allocate arrays of dataspaces and offsets for use with selection I/O,
+ * or point to static buffers */
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_file_spaces_static) / sizeof(chunk_file_spaces_static[0]));
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_addrs_static) / sizeof(chunk_addrs_static[0]));
+ if (num_chunks > (sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]))) {
+ if (NULL == (chunk_mem_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for memory space list")
+ if (NULL == (chunk_file_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for file space list")
+ if (NULL == (chunk_addrs = H5MM_malloc(num_chunks * sizeof(haddr_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for chunk address list")
+ } /* end if */
+ else {
+ chunk_mem_spaces = chunk_mem_spaces_static;
+ chunk_file_spaces = chunk_file_spaces_static;
+ chunk_addrs = chunk_addrs_static;
+ } /* end else */
- /* Get the actual chunk information from the skip list node */
- chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+ /* Reset num_chunks */
+ num_chunks = 0;
- /* Get the info for the chunk in the file */
- if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t *chunk_info; /* Chunk information */
+ H5D_chunk_ud_t udata; /* Chunk index pass-through */
- /* Sanity check */
- HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
- (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
- /* Check for non-existent chunk & skip it if appropriate */
- if (H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint ||
- !skip_missing_chunks) {
- H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
- void * chunk = NULL; /* Pointer to locked chunk buffer */
- htri_t cacheable; /* Whether the chunk is cacheable */
+ /* Get the info for the chunk in the file */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- /* Set chunk's [scaled] coordinates */
- io_info->store->chunk.scaled = chunk_info->scaled;
+ /* There should be no chunks cached */
+ HDassert(UINT_MAX == udata.idx_hint);
- /* Determine if we should use the chunk cache */
- if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
- if (cacheable) {
- /* Load the chunk into cache and lock it. */
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Check for non-existent chunk & skip it if appropriate */
+ if (H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Add chunk to list for selection I/O */
+ chunk_mem_spaces[num_chunks] = chunk_info->mspace;
+ chunk_file_spaces[num_chunks] = chunk_info->fspace;
+ chunk_addrs[num_chunks] = udata.chunk_block.offset;
+ num_chunks++;
+ } /* end if */
+ else if (!skip_missing_chunks) {
+ /* Perform the actual read operation from the nonexistent chunk
+ */
+ if ((io_info->io_ops.single_read)(&nonexistent_io_info, type_info,
+ (hsize_t)chunk_info->chunk_points, chunk_info->fspace,
+ chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
+ } /* end if */
- /* Compute # of bytes accessed in chunk */
- H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
- src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
- /* Lock the chunk into the cache */
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+ /* Issue selection I/O call (we can skip the page buffer because we've
+ * already verified it won't be used, and the metadata accumulator
+ * because this is raw data) */
+ if (H5F_shared_select_read(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, (uint32_t)num_chunks,
+ chunk_mem_spaces, chunk_file_spaces, chunk_addrs, element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunk selection read failed")
+
+ /* Clean up memory */
+ if (chunk_mem_spaces != chunk_mem_spaces_static) {
+ HDassert(chunk_mem_spaces);
+ HDassert(chunk_file_spaces != chunk_file_spaces_static);
+ HDassert(chunk_addrs != chunk_addrs_static);
+ H5MM_free(chunk_mem_spaces);
+ chunk_mem_spaces = NULL;
+ H5MM_free(chunk_file_spaces);
+ chunk_file_spaces = NULL;
+ H5MM_free(chunk_addrs);
+ chunk_addrs = NULL;
+ } /* end if */
+ } /* end if */
+ else {
+ H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
+ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
+ H5D_io_info_t cpt_io_info; /* Compact I/O info object */
+ H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
+ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
+
+ /* Set up contiguous I/O info object */
+ H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info));
+ ctg_io_info.store = &ctg_store;
+ ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
+
+ /* Initialize temporary contiguous storage info */
+ H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size,
+ uint32_t);
+
+ /* Set up compact I/O info object */
+ H5MM_memcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
+ cpt_io_info.store = &cpt_store;
+ cpt_io_info.layout_ops = *H5D_LOPS_COMPACT;
+
+ /* Initialize temporary compact storage info */
+ cpt_store.compact.dirty = &cpt_dirty;
+
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t *chunk_info; /* Chunk information */
+ H5D_chunk_ud_t udata; /* Chunk index pass-through */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+
+ /* Get the info for the chunk in the file */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- /* Set up the storage buffer information for this chunk */
- cpt_store.compact.buf = chunk;
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
- /* Point I/O info at contiguous I/O info for this chunk */
- chk_io_info = &cpt_io_info;
- } /* end if */
- else if (H5F_addr_defined(udata.chunk_block.offset)) {
- /* Set up the storage address information for this chunk */
- ctg_store.contig.dset_addr = udata.chunk_block.offset;
+ /* Check for non-existent chunk & skip it if appropriate */
+ if (H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint ||
+ !skip_missing_chunks) {
+ H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
+ void * chunk = NULL; /* Pointer to locked chunk buffer */
- /* Point I/O info at temporary I/O info for this chunk */
- chk_io_info = &ctg_io_info;
- } /* end else if */
- else {
- /* Point I/O info at "nonexistent" I/O info for this chunk */
- chk_io_info = &nonexistent_io_info;
- } /* end else */
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
- /* Perform the actual read operation */
- if ((io_info->io_ops.single_read)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
- chunk_info->fspace, chunk_info->mspace) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
+ /* Determine if we should use the chunk cache */
+ if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
+ if (cacheable) {
+ /* Load the chunk into cache and lock it. */
- /* Release the cache lock on the chunk. */
- if (chunk && H5D__chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
- } /* end if */
+ /* Compute # of bytes accessed in chunk */
+ H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
+ src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size;
- /* Advance to next chunk in list */
- chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
- } /* end while */
+ /* Lock the chunk into the cache */
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE)))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Set up the storage buffer information for this chunk */
+ cpt_store.compact.buf = chunk;
+
+ /* Point I/O info at contiguous I/O info for this chunk */
+ chk_io_info = &cpt_io_info;
+ } /* end if */
+ else if (H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Set up the storage address information for this chunk */
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
+
+ /* Point I/O info at temporary I/O info for this chunk */
+ chk_io_info = &ctg_io_info;
+ } /* end else if */
+ else {
+ /* Point I/O info at "nonexistent" I/O info for this chunk */
+ chk_io_info = &nonexistent_io_info;
+ } /* end else */
+
+ /* Perform the actual read operation */
+ if ((io_info->io_ops.single_read)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
+ chunk_info->fspace, chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed")
+
+ /* Release the cache lock on the chunk. */
+ if (chunk && H5D__chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
+ } /* end else */
done:
+ /* Cleanup on failure */
+ if (ret_value < 0) {
+ if (chunk_mem_spaces != chunk_mem_spaces_static)
+ chunk_mem_spaces = H5MM_xfree(chunk_mem_spaces);
+ if (chunk_file_spaces != chunk_file_spaces_static)
+ chunk_file_spaces = H5MM_xfree(chunk_file_spaces);
+ if (chunk_addrs != chunk_addrs_static)
+ chunk_addrs = H5MM_xfree(chunk_addrs);
+ } /* end if */
+
+ /* Make sure we cleaned up */
+ HDassert(!chunk_mem_spaces || chunk_mem_spaces == chunk_mem_spaces_static);
+ HDassert(!chunk_file_spaces || chunk_file_spaces == chunk_file_spaces_static);
+ HDassert(!chunk_addrs || chunk_addrs == chunk_addrs_static);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__chunk_read() */
@@ -2625,14 +2824,20 @@ static herr_t
H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t H5_ATTR_UNUSED nelmts,
H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t *fm)
{
- H5SL_node_t * chunk_node; /* Current node in chunk skip list */
- H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
- H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
- H5D_io_info_t cpt_io_info; /* Compact I/O info object */
- H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
- hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
- uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5SL_node_t * chunk_node; /* Current node in chunk skip list */
+ H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
+ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
+ H5D_io_info_t cpt_io_info; /* Compact I/O info object */
+ H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
+ hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
+ uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
+ H5S_t ** chunk_mem_spaces = NULL; /* Array of chunk memory spaces */
+ H5S_t * chunk_mem_spaces_static[8]; /* Static buffer for chunk_mem_spaces */
+ H5S_t ** chunk_file_spaces = NULL; /* Array of chunk file spaces */
+ H5S_t * chunk_file_spaces_static[8]; /* Static buffer for chunk_file_spaces */
+ haddr_t * chunk_addrs = NULL; /* Array of chunk addresses */
+ haddr_t chunk_addrs_static[8]; /* Static buffer for chunk_addrs */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2659,116 +2864,295 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
/* Initialize temporary compact storage info */
cpt_store.compact.dirty = &cpt_dirty;
- /* Iterate through nodes in chunk skip list */
- chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
- while (chunk_node) {
- H5D_chunk_info_t * chunk_info; /* Chunk information */
- H5D_chk_idx_info_t idx_info; /* Chunked index info */
- H5D_io_info_t * chk_io_info; /* Pointer to I/O info object for this chunk */
- void * chunk; /* Pointer to locked chunk buffer */
- H5D_chunk_ud_t udata; /* Index pass-through */
- htri_t cacheable; /* Whether the chunk is cacheable */
- hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
+ /* Different blocks depending on whether we're using selection I/O */
+ if (io_info->use_select_io) {
+ size_t num_chunks;
+ size_t element_sizes[2] = {type_info->dst_type_size, 0};
+ const void *bufs[2] = {io_info->u.wbuf, NULL};
+
+ /* Cache number of chunks */
+ num_chunks = H5D_CHUNK_GET_NODE_COUNT(fm);
+
+ /* Allocate arrays of dataspaces and offsets for use with selection I/O,
+ * or point to static buffers */
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_file_spaces_static) / sizeof(chunk_file_spaces_static[0]));
+ HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) ==
+ sizeof(chunk_addrs_static) / sizeof(chunk_addrs_static[0]));
+ if (num_chunks > (sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]))) {
+ if (NULL == (chunk_mem_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for memory space list")
+ if (NULL == (chunk_file_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for file space list")
+ if (NULL == (chunk_addrs = H5MM_malloc(num_chunks * sizeof(haddr_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for chunk address list")
+ } /* end if */
+ else {
+ chunk_mem_spaces = chunk_mem_spaces_static;
+ chunk_file_spaces = chunk_file_spaces_static;
+ chunk_addrs = chunk_addrs_static;
+ } /* end else */
- /* Get the actual chunk information from the skip list node */
- chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+ /* Reset num_chunks */
+ num_chunks = 0;
- /* Look up the chunk */
- if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t * chunk_info; /* Chunk information */
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ H5D_chunk_ud_t udata; /* Index pass-through */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
- /* Sanity check */
- HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
- (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
-
- /* Set chunk's [scaled] coordinates */
- io_info->store->chunk.scaled = chunk_info->scaled;
-
- /* Determine if we should use the chunk cache */
- if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
- if (cacheable) {
- /* Load the chunk into cache. But if the whole chunk is written,
- * simply allocate space instead of load the chunk. */
- hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
-
- /* Compute # of bytes accessed in chunk */
- H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
- dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
-
- /* Determine if we will access all the data in the chunk */
- if (dst_accessed_bytes != ctg_store.contig.dset_size ||
- (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
- fm->fsel_type == H5S_SEL_POINTS)
- entire_chunk = FALSE;
-
- /* Lock the chunk into the cache */
- if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
-
- /* Set up the storage buffer information for this chunk */
- cpt_store.compact.buf = chunk;
-
- /* Point I/O info at main I/O info for this chunk */
- chk_io_info = &cpt_io_info;
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+
+ /* Get the info for the chunk in the file */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* There should be no chunks cached */
+ HDassert(UINT_MAX == udata.idx_hint);
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
+ /* Determine if we should use the chunk cache */
+ if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
+ if (cacheable) {
+ /* Load the chunk into cache. But if the whole chunk is written,
+ * simply allocate space instead of load the chunk. */
+ void * chunk; /* Pointer to locked chunk buffer */
+ hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
+
+ /* Compute # of bytes accessed in chunk */
+ H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
+ dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
+
+ /* Determine if we will access all the data in the chunk */
+ if (dst_accessed_bytes != ctg_store.contig.dset_size ||
+ (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
+ fm->fsel_type == H5S_SEL_POINTS)
+ entire_chunk = FALSE;
+
+ /* Lock the chunk into the cache */
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Set up the storage buffer information for this chunk */
+ cpt_store.compact.buf = chunk;
+
+ /* Perform the actual write operation */
+ if ((io_info->io_ops.single_write)(&cpt_io_info, type_info, (hsize_t)chunk_info->chunk_points,
+ chunk_info->fspace, chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
+
+ /* Release the cache lock on the chunk */
+ if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+ else {
+ /* If the chunk hasn't been allocated on disk, do so now. */
+ if (!H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Compose chunked index info struct */
+ idx_info.f = io_info->dset->oloc.file;
+ idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
+ idx_info.layout = &(io_info->dset->shared->layout.u.chunk);
+ idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
+
+ /* Set up the size of chunk for user data */
+ udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
+
+ /* Allocate the chunk */
+ if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert,
+ chunk_info->scaled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert/resize chunk on chunk level")
+
+ /* Make sure the address of the chunk is returned. */
+ if (!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+
+ /* Cache the new chunk information */
+ H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+
+ /* Insert chunk into index */
+ if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
+ if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata,
+ NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert chunk addr into index")
+ } /* end if */
+
+ /* Add chunk to list for selection I/O */
+ chunk_mem_spaces[num_chunks] = chunk_info->mspace;
+ chunk_file_spaces[num_chunks] = chunk_info->fspace;
+ chunk_addrs[num_chunks] = udata.chunk_block.offset;
+ num_chunks++;
+ } /* end else */
+
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
+
+ /* Issue selection I/O call (we can skip the page buffer because we've
+ * already verified it won't be used, and the metadata accumulator
+ * because this is raw data) */
+ if (H5F_shared_select_write(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, (uint32_t)num_chunks,
+ chunk_mem_spaces, chunk_file_spaces, chunk_addrs, element_sizes,
+ bufs) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunk selection read failed")
+
+ /* Clean up memory */
+ if (chunk_mem_spaces != chunk_mem_spaces_static) {
+ HDassert(chunk_mem_spaces);
+ HDassert(chunk_file_spaces != chunk_file_spaces_static);
+ HDassert(chunk_addrs != chunk_addrs_static);
+ H5MM_free(chunk_mem_spaces);
+ chunk_mem_spaces = NULL;
+ H5MM_free(chunk_file_spaces);
+ chunk_file_spaces = NULL;
+ H5MM_free(chunk_addrs);
+ chunk_addrs = NULL;
} /* end if */
- else {
- /* If the chunk hasn't been allocated on disk, do so now. */
- if (!H5F_addr_defined(udata.chunk_block.offset)) {
- /* Compose chunked index info struct */
- idx_info.f = io_info->dset->oloc.file;
- idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
- idx_info.layout = &(io_info->dset->shared->layout.u.chunk);
- idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
-
- /* Set up the size of chunk for user data */
- udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
-
- /* Allocate the chunk */
- if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert,
- chunk_info->scaled) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
- "unable to insert/resize chunk on chunk level")
-
- /* Make sure the address of the chunk is returned. */
- if (!H5F_addr_defined(udata.chunk_block.offset))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
-
- /* Cache the new chunk information */
- H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+ } /* end if */
+ else {
+ /* Iterate through nodes in chunk skip list */
+ chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm);
+ while (chunk_node) {
+ H5D_chunk_info_t * chunk_info; /* Chunk information */
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ H5D_io_info_t * chk_io_info; /* Pointer to I/O info object for this chunk */
+ void * chunk; /* Pointer to locked chunk buffer */
+ H5D_chunk_ud_t udata; /* Index pass-through */
+ htri_t cacheable; /* Whether the chunk is cacheable */
+ hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
+
+ /* Get the actual chunk information from the skip list node */
+ chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node);
+
+ /* Look up the chunk */
+ if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Set chunk's [scaled] coordinates */
+ io_info->store->chunk.scaled = chunk_info->scaled;
+
+ /* Determine if we should use the chunk cache */
+ if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable")
+ if (cacheable) {
+ /* Load the chunk into cache. But if the whole chunk is written,
+ * simply allocate space instead of load the chunk. */
+ hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
+
+ /* Compute # of bytes accessed in chunk */
+ H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t);
+ dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size;
+
+ /* Determine if we will access all the data in the chunk */
+ if (dst_accessed_bytes != ctg_store.contig.dset_size ||
+ (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size ||
+ fm->fsel_type == H5S_SEL_POINTS)
+ entire_chunk = FALSE;
+
+ /* Lock the chunk into the cache */
+ if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE)))
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Set up the storage buffer information for this chunk */
+ cpt_store.compact.buf = chunk;
+
+ /* Point I/O info at main I/O info for this chunk */
+ chk_io_info = &cpt_io_info;
} /* end if */
+ else {
+ /* If the chunk hasn't been allocated on disk, do so now. */
+ if (!H5F_addr_defined(udata.chunk_block.offset)) {
+ /* Compose chunked index info struct */
+ idx_info.f = io_info->dset->oloc.file;
+ idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
+ idx_info.layout = &(io_info->dset->shared->layout.u.chunk);
+ idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
+
+ /* Set up the size of chunk for user data */
+ udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size;
+
+ /* Allocate the chunk */
+ if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert,
+ chunk_info->scaled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert/resize chunk on chunk level")
+
+ /* Make sure the address of the chunk is returned. */
+ if (!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+
+ /* Cache the new chunk information */
+ H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata);
+ } /* end if */
- /* Set up the storage address information for this chunk */
- ctg_store.contig.dset_addr = udata.chunk_block.offset;
+ /* Set up the storage address information for this chunk */
+ ctg_store.contig.dset_addr = udata.chunk_block.offset;
- /* No chunk cached */
- chunk = NULL;
+ /* No chunk cached */
+ chunk = NULL;
- /* Point I/O info at temporary I/O info for this chunk */
- chk_io_info = &ctg_io_info;
- } /* end else */
+ /* Point I/O info at temporary I/O info for this chunk */
+ chk_io_info = &ctg_io_info;
+ } /* end else */
- /* Perform the actual write operation */
- if ((io_info->io_ops.single_write)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
- chunk_info->fspace, chunk_info->mspace) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
+ /* Perform the actual write operation */
+ if ((io_info->io_ops.single_write)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points,
+ chunk_info->fspace, chunk_info->mspace) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed")
- /* Release the cache lock on the chunk, or insert chunk into index. */
- if (chunk) {
- if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
- } /* end if */
- else {
- if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
- if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
- } /* end else */
+ /* Release the cache lock on the chunk, or insert chunk into index. */
+ if (chunk) {
+ if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
+ } /* end if */
+ else {
+ if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert)
+ if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) <
+ 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
+ "unable to insert chunk addr into index")
+ } /* end else */
- /* Advance to next chunk in list */
- chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
- } /* end while */
+ /* Advance to next chunk in list */
+ chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node);
+ } /* end while */
+ } /* end else */
done:
+ /* Cleanup on failure */
+ if (ret_value < 0) {
+ if (chunk_mem_spaces != chunk_mem_spaces_static)
+ chunk_mem_spaces = H5MM_xfree(chunk_mem_spaces);
+ if (chunk_file_spaces != chunk_file_spaces_static)
+ chunk_file_spaces = H5MM_xfree(chunk_file_spaces);
+ if (chunk_addrs != chunk_addrs_static)
+ chunk_addrs = H5MM_xfree(chunk_addrs);
+ } /* end if */
+
+ /* Make sure we cleaned up */
+ HDassert(!chunk_mem_spaces || chunk_mem_spaces == chunk_mem_spaces_static);
+ HDassert(!chunk_file_spaces || chunk_file_spaces == chunk_file_spaces_static);
+ HDassert(!chunk_addrs || chunk_addrs == chunk_addrs_static);
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5D__chunk_write() */
@@ -3178,7 +3562,9 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5D_chunk_ud_t *udat
unsigned idx = 0; /* Index of chunk in cache, if present */
hbool_t found = FALSE; /* In cache? */
#ifdef H5_HAVE_PARALLEL
- hbool_t reenable_coll_md_reads = FALSE;
+ H5P_coll_md_read_flag_t md_reads_file_flag;
+ hbool_t md_reads_context_flag;
+ hbool_t restore_md_reads_state = FALSE;
#endif
herr_t ret_value = SUCCEED; /* Return value */
@@ -3252,11 +3638,10 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5D_chunk_ud_t *udat
* processes.
*/
if (H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) {
- hbool_t do_coll_md_reads = H5CX_get_coll_metadata_read();
- if (do_coll_md_reads) {
- H5CX_set_coll_metadata_read(FALSE);
- reenable_coll_md_reads = TRUE;
- }
+ md_reads_file_flag = H5P_FORCE_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(idx_info.f, &md_reads_file_flag, &md_reads_context_flag);
+ restore_md_reads_state = TRUE;
}
#endif /* H5_HAVE_PARALLEL */
@@ -3302,8 +3687,8 @@ H5D__chunk_lookup(const H5D_t *dset, const hsize_t *scaled, H5D_chunk_ud_t *udat
done:
#ifdef H5_HAVE_PARALLEL
/* Re-enable collective metadata reads if we disabled them */
- if (reenable_coll_md_reads)
- H5CX_set_coll_metadata_read(TRUE);
+ if (restore_md_reads_state)
+ H5F_set_coll_metadata_reads(dset->oloc.file, &md_reads_file_flag, &md_reads_context_flag);
#endif /* H5_HAVE_PARALLEL */
FUNC_LEAVE_NOAPI(ret_value)
@@ -4319,8 +4704,8 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, const
hbool_t blocks_written = FALSE; /* Flag to indicate that chunk was actually written */
hbool_t using_mpi =
FALSE; /* Flag to indicate that the file is being accessed with an MPI-capable file driver */
- H5D_chunk_coll_info_t chunk_info; /* chunk address information for doing I/O */
-#endif /* H5_HAVE_PARALLEL */
+ H5D_chunk_coll_fill_info_t chunk_fill_info; /* chunk address information for doing I/O */
+#endif /* H5_HAVE_PARALLEL */
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
unsigned space_ndims; /* Dataset's space rank */
const hsize_t * space_dim; /* Dataset's dataspace dimensions */
@@ -4367,8 +4752,8 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, const
using_mpi = TRUE;
/* init chunk info stuff for collective I/O */
- chunk_info.num_io = 0;
- chunk_info.addr = NULL;
+ chunk_fill_info.num_chunks = 0;
+ chunk_fill_info.chunk_info = NULL;
} /* end if */
#endif /* H5_HAVE_PARALLEL */
@@ -4640,19 +5025,26 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, const
if (using_mpi) {
/* collect all chunk addresses to be written to
write collectively at the end */
- /* allocate/resize address array if no more space left */
- /* Note that if we add support for parallel filters we must
- * also store an array of chunk sizes and pass it to the
- * apporpriate collective write function */
- if (0 == chunk_info.num_io % 1024)
- if (NULL == (chunk_info.addr = (haddr_t *)H5MM_realloc(
- chunk_info.addr, (chunk_info.num_io + 1024) * sizeof(haddr_t))))
+
+ /* allocate/resize chunk info array if no more space left */
+ if (0 == chunk_fill_info.num_chunks % 1024) {
+ void *tmp_realloc;
+
+ if (NULL == (tmp_realloc = H5MM_realloc(chunk_fill_info.chunk_info,
+ (chunk_fill_info.num_chunks + 1024) *
+ sizeof(struct chunk_coll_fill_info))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "memory allocation failed for chunk addresses")
+ "memory allocation failed for chunk fill info")
+
+ chunk_fill_info.chunk_info = tmp_realloc;
+ }
- /* Store the chunk's address for later */
- chunk_info.addr[chunk_info.num_io] = udata.chunk_block.offset;
- chunk_info.num_io++;
+ /* Store info about the chunk for later */
+ chunk_fill_info.chunk_info[chunk_fill_info.num_chunks].addr = udata.chunk_block.offset;
+ chunk_fill_info.chunk_info[chunk_fill_info.num_chunks].chunk_size = chunk_size;
+ chunk_fill_info.chunk_info[chunk_fill_info.num_chunks].unfiltered_partial_chunk =
+ (*fill_buf == unfilt_fill_buf);
+ chunk_fill_info.num_chunks++;
/* Indicate that blocks will be written */
blocks_written = TRUE;
@@ -4725,7 +5117,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, const
#ifdef H5_HAVE_PARALLEL
/* do final collective I/O */
if (using_mpi && blocks_written)
- if (H5D__chunk_collective_fill(dset, &chunk_info, chunk_size, fb_info.fill_buf) < 0)
+ if (H5D__chunk_collective_fill(dset, &chunk_fill_info, fb_info.fill_buf, unfilt_fill_buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#endif /* H5_HAVE_PARALLEL */
@@ -4741,8 +5133,8 @@ done:
unfilt_fill_buf = H5D__chunk_mem_xfree(unfilt_fill_buf, &def_pline);
#ifdef H5_HAVE_PARALLEL
- if (using_mpi && chunk_info.addr)
- H5MM_free(chunk_info.addr);
+ if (using_mpi && chunk_fill_info.chunk_info)
+ H5MM_free(chunk_fill_info.chunk_info);
#endif
FUNC_LEAVE_NOAPI(ret_value)
@@ -4936,27 +5328,35 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info, size_t chunk_size,
- const void *fill_buf)
+H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_fill_info_t *chunk_fill_info,
+ const void *fill_buf, const void *partial_chunk_fill_buf)
{
- MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */
- int mpi_rank = (-1); /* This process's rank */
- int mpi_size = (-1); /* MPI Comm size */
- int mpi_code; /* MPI return code */
- size_t num_blocks; /* Number of blocks between processes. */
- size_t leftover_blocks; /* Number of leftover blocks to handle */
- int blocks, leftover, block_len; /* converted to int for MPI */
+ MPI_Comm mpi_comm = MPI_COMM_NULL; /* MPI communicator for file */
+ int mpi_rank = (-1); /* This process's rank */
+ int mpi_size = (-1); /* MPI Comm size */
+ int mpi_code; /* MPI return code */
+ size_t num_blocks; /* Number of blocks between processes. */
+ size_t leftover_blocks; /* Number of leftover blocks to handle */
+ int blocks, leftover; /* converted to int for MPI */
MPI_Aint * chunk_disp_array = NULL;
+ MPI_Aint * block_disps = NULL;
int * block_lens = NULL;
MPI_Datatype mem_type = MPI_BYTE, file_type = MPI_BYTE;
H5FD_mpio_xfer_t prev_xfer_mode; /* Previous data xfer mode */
hbool_t have_xfer_mode = FALSE; /* Whether the previous xffer mode has been retrieved */
- hbool_t need_addr_sort = FALSE;
- int i; /* Local index variable */
+ hbool_t need_sort = FALSE;
+ size_t i; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
+ /*
+ * If a separate fill buffer is provided for partial chunks, ensure
+ * that the "don't filter partial edge chunks" flag is set.
+ */
+ if (partial_chunk_fill_buf)
+ HDassert(dset->shared->layout.u.chunk.flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
/* Get the MPI communicator */
if (MPI_COMM_NULL == (mpi_comm = H5F_mpi_get_comm(dset->oloc.file)))
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Can't retrieve MPI communicator")
@@ -4972,39 +5372,89 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
/* Distribute evenly the number of blocks between processes. */
if (mpi_size == 0)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Resulted in division by zero")
- num_blocks = (size_t)(chunk_info->num_io / (size_t)mpi_size); /* value should be the same on all procs */
+ num_blocks =
+ (size_t)(chunk_fill_info->num_chunks / (size_t)mpi_size); /* value should be the same on all procs */
/* After evenly distributing the blocks between processes, are there any
* leftover blocks for each individual process (round-robin)?
*/
- leftover_blocks = (size_t)(chunk_info->num_io % (size_t)mpi_size);
+ leftover_blocks = (size_t)(chunk_fill_info->num_chunks % (size_t)mpi_size);
/* Cast values to types needed by MPI */
H5_CHECKED_ASSIGN(blocks, int, num_blocks, size_t);
H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t);
- H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t);
/* Check if we have any chunks to write on this rank */
if (num_blocks > 0 || (leftover && leftover > mpi_rank)) {
+ MPI_Aint partial_fill_buf_disp = 0;
+ hbool_t all_same_block_len = TRUE;
+
/* Allocate buffers */
- /* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */
- if (NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer")
if (NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc((size_t)(blocks + 1) * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer")
- for (i = 0; i < blocks; i++) {
- /* store the chunk address as an MPI_Aint */
- chunk_disp_array[i] = (MPI_Aint)(chunk_info->addr[i + (mpi_rank * blocks)]);
+ if (partial_chunk_fill_buf) {
+ MPI_Aint fill_buf_addr;
+ MPI_Aint partial_fill_buf_addr;
+
+ /* Calculate the displacement between the fill buffer and partial chunk fill buffer */
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address(fill_buf, &fill_buf_addr)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address failed", mpi_code)
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address(partial_chunk_fill_buf, &partial_fill_buf_addr)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address failed", mpi_code)
- /* MSC - should not need this if MPI_type_create_hindexed_block is working */
- block_lens[i] = block_len;
+#if H5_CHECK_MPI_VERSION(3, 1)
+ partial_fill_buf_disp = MPI_Aint_diff(partial_fill_buf_addr, fill_buf_addr);
+#else
+ partial_fill_buf_disp = partial_fill_buf_addr - fill_buf_addr;
+#endif
- /* Make sure that the addresses in the datatype are
- * monotonically non-decreasing
+ /*
+ * Allocate all-zero block displacements array. If a block's displacement
+ * is left as zero, that block will be written to from the regular fill
+ * buffer. If a block represents an unfiltered partial edge chunk, its
+ * displacement will be set so that the block is written to from the
+ * unfiltered fill buffer.
*/
- if (i && (chunk_disp_array[i] < chunk_disp_array[i - 1]))
- need_addr_sort = TRUE;
+ if (NULL == (block_disps = (MPI_Aint *)H5MM_calloc((size_t)(blocks + 1) * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate block displacements buffer")
+ }
+
+ /*
+ * Perform initial scan of chunk info list to:
+ * - make sure that chunk addresses are monotonically non-decreasing
+ * - check if all blocks have the same length
+ */
+ for (i = 1; i < chunk_fill_info->num_chunks; i++) {
+ if (chunk_fill_info->chunk_info[i].addr < chunk_fill_info->chunk_info[i - 1].addr)
+ need_sort = TRUE;
+
+ if (chunk_fill_info->chunk_info[i].chunk_size != chunk_fill_info->chunk_info[i - 1].chunk_size)
+ all_same_block_len = FALSE;
+ }
+
+ if (need_sort)
+ HDqsort(chunk_fill_info->chunk_info, chunk_fill_info->num_chunks,
+ sizeof(struct chunk_coll_fill_info), H5D__chunk_cmp_coll_fill_info);
+
+ /* Allocate buffer for block lengths if necessary */
+ if (!all_same_block_len)
+ if (NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer")
+
+ for (i = 0; i < (size_t)blocks; i++) {
+ size_t idx = i + (size_t)(mpi_rank * blocks);
+
+ /* store the chunk address as an MPI_Aint */
+ chunk_disp_array[i] = (MPI_Aint)(chunk_fill_info->chunk_info[idx].addr);
+
+ if (!all_same_block_len)
+ H5_CHECKED_ASSIGN(block_lens[i], int, chunk_fill_info->chunk_info[idx].chunk_size, size_t);
+
+ if (chunk_fill_info->chunk_info[idx].unfiltered_partial_chunk) {
+ HDassert(partial_chunk_fill_buf);
+ block_disps[i] = partial_fill_buf_disp;
+ }
} /* end for */
/* Calculate if there are any leftover blocks after evenly
@@ -5012,32 +5462,71 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
* to processes 0 -> leftover.
*/
if (leftover && leftover > mpi_rank) {
- chunk_disp_array[blocks] = (MPI_Aint)chunk_info->addr[(blocks * mpi_size) + mpi_rank];
- if (blocks && (chunk_disp_array[blocks] < chunk_disp_array[blocks - 1]))
- need_addr_sort = TRUE;
- block_lens[blocks] = block_len;
+ chunk_disp_array[blocks] =
+ (MPI_Aint)chunk_fill_info->chunk_info[(blocks * mpi_size) + mpi_rank].addr;
+
+ if (!all_same_block_len)
+ H5_CHECKED_ASSIGN(block_lens[blocks], int,
+ chunk_fill_info->chunk_info[(blocks * mpi_size) + mpi_rank].chunk_size,
+ size_t);
+
+ if (chunk_fill_info->chunk_info[(blocks * mpi_size) + mpi_rank].unfiltered_partial_chunk) {
+ HDassert(partial_chunk_fill_buf);
+ block_disps[blocks] = partial_fill_buf_disp;
+ }
+
blocks++;
}
- /* Ensure that the blocks are sorted in monotonically non-decreasing
- * order of offset in the file.
- */
- if (need_addr_sort)
- HDqsort(chunk_disp_array, (size_t)blocks, sizeof(MPI_Aint), H5D__chunk_cmp_addr);
+ /* Create file and memory types for the write operation */
+ if (all_same_block_len) {
+ int block_len;
+
+ H5_CHECKED_ASSIGN(block_len, int, chunk_fill_info->chunk_info[0].chunk_size, size_t);
+
+ mpi_code =
+ MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type);
+ if (mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code)
+
+ if (partial_chunk_fill_buf) {
+ /*
+ * If filters are disabled for partial edge chunks, those chunks could
+ * potentially have the same block length as the other chunks, but still
+ * need to be written to using the unfiltered fill buffer. Use an hindexed
+ * block type rather than an hvector.
+ */
+ mpi_code =
+ MPI_Type_create_hindexed_block(blocks, block_len, block_disps, MPI_BYTE, &mem_type);
+ if (mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code)
+ }
+ else {
+ mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type);
+ if (mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
+ }
+ }
+ else {
+ /*
+ * Currently, different block lengths implies that there are partial
+ * edge chunks and the "don't filter partial edge chunks" flag is set.
+ */
+ HDassert(partial_chunk_fill_buf);
+ HDassert(block_lens);
+ HDassert(block_disps);
+
+ mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type);
+ if (mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+
+ mpi_code = MPI_Type_create_hindexed(blocks, block_lens, block_disps, MPI_BYTE, &mem_type);
+ if (mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+ }
- /* MSC - should use this if MPI_type_create_hindexed block is working:
- * mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE,
- * &file_type);
- */
- mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type);
- if (mpi_code != MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
-
- mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type);
- if (mpi_code != MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
} /* end if */
@@ -5080,39 +5569,25 @@ done:
if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
H5MM_xfree(chunk_disp_array);
+ H5MM_xfree(block_disps);
H5MM_xfree(block_lens);
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_collective_fill() */
static int
-H5D__chunk_cmp_addr(const void *addr1, const void *addr2)
+H5D__chunk_cmp_coll_fill_info(const void *_entry1, const void *_entry2)
{
- MPI_Aint _addr1 = (MPI_Aint)0, _addr2 = (MPI_Aint)0;
- int ret_value = 0;
+ const struct chunk_coll_fill_info *entry1;
+ const struct chunk_coll_fill_info *entry2;
FUNC_ENTER_STATIC_NOERR
- _addr1 = *((const MPI_Aint *)addr1);
- _addr2 = *((const MPI_Aint *)addr2);
-
-#if MPI_VERSION >= 3 && MPI_SUBVERSION >= 1
- {
- MPI_Aint diff = MPI_Aint_diff(_addr1, _addr2);
-
- if (diff < (MPI_Aint)0)
- ret_value = -1;
- else if (diff > (MPI_Aint)0)
- ret_value = 1;
- else
- ret_value = 0;
- }
-#else
- ret_value = (_addr1 > _addr2) - (_addr1 < _addr2);
-#endif
+ entry1 = (const struct chunk_coll_fill_info *)_entry1;
+ entry2 = (const struct chunk_coll_fill_info *)_entry2;
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__chunk_cmp_addr() */
+ FUNC_LEAVE_NOAPI(H5F_addr_cmp(entry1->addr, entry2->addr))
+} /* end H5D__chunk_cmp_coll_fill_info() */
#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
@@ -6696,10 +7171,10 @@ H5D__chunk_stats(const H5D_t *dset, hbool_t headers)
miss_rate = 0.0;
}
if (miss_rate > 100) {
- HDsprintf(ascii, "%7d%%", (int)(miss_rate + 0.5));
+ HDsnprintf(ascii, sizeof(ascii), "%7d%%", (int)(miss_rate + 0.5));
}
else {
- HDsprintf(ascii, "%7.2f%%", miss_rate);
+ HDsnprintf(ascii, sizeof(ascii), "%7.2f%%", miss_rate);
}
HDfprintf(H5DEBUG(AC), " %-18s %8u %8u %7s %8d+%-9ld\n", "raw data chunks", rdcc->stats.nhits,
@@ -6826,7 +7301,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-static hbool_t
+hbool_t
H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, const uint32_t *chunk_dims, const hsize_t scaled[],
const hsize_t *dset_dims)
{
@@ -7121,6 +7596,89 @@ done:
} /* end H5D__chunk_format_convert() */
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_index_empty_cb
+ *
+ * Purpose: Callback function that simply stops iteration and sets the
+ * `empty` parameter to FALSE if called. If this callback is
+ * entered, it means that the chunk index contains at least
+ * one chunk, so is not empty.
+ *
+ * Return: H5_ITER_STOP
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__chunk_index_empty_cb(const H5D_chunk_rec_t H5_ATTR_UNUSED *chunk_rec, void *_udata)
+{
+ hbool_t *empty = (hbool_t *)_udata;
+ int ret_value = H5_ITER_STOP;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ *empty = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_index_empty_cb() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_index_empty
+ *
+ * Purpose: Determines whether a chunk index is empty (has no chunks
+ * inserted into it yet).
+ *
+ * Note: This routine is meant to be a little more performant than
+ * just counting the number of chunks in the index. In the
+ * future, this is probably a callback that the chunk index
+ * ops structure should provide.
+ *
+ * Return: Non-negative on Success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__chunk_index_empty(const H5D_t *dset, hbool_t *empty)
+{
+ H5D_chk_idx_info_t idx_info; /* Chunked index info */
+ H5D_rdcc_ent_t * ent; /* Cache entry */
+ const H5D_rdcc_t * rdcc = NULL; /* Raw data chunk cache */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr)
+
+ HDassert(dset);
+ HDassert(dset->shared);
+ HDassert(empty);
+
+ rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */
+ HDassert(rdcc);
+
+ /* Search for cached chunks that haven't been written out */
+ for (ent = rdcc->head; ent; ent = ent->next)
+ /* Flush the chunk out to disk, to make certain the size is correct later */
+ if (H5D__chunk_flush_entry(dset, ent, FALSE) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer")
+
+ /* Compose chunked index info struct */
+ idx_info.f = dset->oloc.file;
+ idx_info.pline = &dset->shared->dcpl_cache.pline;
+ idx_info.layout = &dset->shared->layout.u.chunk;
+ idx_info.storage = &dset->shared->layout.storage.u.chunk;
+
+ *empty = TRUE;
+
+ if (H5F_addr_defined(idx_info.storage->idx_addr)) {
+ /* Iterate over the allocated chunks */
+ if ((dset->shared->layout.storage.u.chunk.ops->iterate)(&idx_info, H5D__chunk_index_empty_cb, empty) <
+ 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "unable to retrieve allocated chunk information from index")
+ }
+
+done:
+ FUNC_LEAVE_NOAPI_TAG(ret_value)
+} /* end H5D__chunk_index_empty() */
+
+/*-------------------------------------------------------------------------
* Function: H5D__get_num_chunks_cb
*
* Purpose: Callback function that increments the number of written
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index 356a54e..1ac1267 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -63,8 +63,8 @@ typedef struct H5D_compact_iovv_memmanage_ud_t {
/* Layout operation callbacks */
static herr_t H5D__compact_construct(H5F_t *f, H5D_t *dset);
static hbool_t H5D__compact_is_space_alloc(const H5O_storage_t *storage);
-static herr_t H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm);
+static herr_t H5D__compact_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
+ H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm);
static herr_t H5D__compact_iovv_memmanage_cb(hsize_t dst_off, hsize_t src_off, size_t len, void *_udata);
static ssize_t H5D__compact_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq,
size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq,
@@ -247,7 +247,7 @@ H5D__compact_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage)
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info,
+H5D__compact_io_init(H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info,
hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space,
H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *cm)
{
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index 2ace14b..840c7ec 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -43,6 +43,7 @@
#include "H5FOprivate.h" /* File objects */
#include "H5Oprivate.h" /* Object headers */
#include "H5Pprivate.h" /* Property lists */
+#include "H5PBprivate.h" /* Page Buffer */
#include "H5VMprivate.h" /* Vector and array functions */
/****************/
@@ -90,8 +91,8 @@ typedef struct H5D_contig_writevv_ud_t {
/* Layout operation callbacks */
static herr_t H5D__contig_construct(H5F_t *f, H5D_t *dset);
static herr_t H5D__contig_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id);
-static herr_t H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm);
+static herr_t H5D__contig_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
+ H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm);
static ssize_t H5D__contig_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq,
size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq,
size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]);
@@ -102,6 +103,7 @@ static herr_t H5D__contig_flush(H5D_t *dset);
/* Helper routines */
static herr_t H5D__contig_write_one(H5D_io_info_t *io_info, hsize_t offset, size_t size);
+static htri_t H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, H5D_io_op_type_t op_type);
/*********************/
/* Package Variables */
@@ -278,9 +280,16 @@ H5D__contig_fill(const H5D_io_info_t *io_info)
if (using_mpi) {
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
- if (H5_PAR_META_WRITE == mpi_rank)
- if (H5D__contig_write_one(&ioinfo, offset, size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset")
+ if (H5_PAR_META_WRITE == mpi_rank) {
+ if (H5D__contig_write_one(&ioinfo, offset, size) < 0) {
+ /* If writing fails, push an error and stop writing, but
+ * still participate in following MPI_Barrier.
+ */
+ blocks_written = TRUE;
+ HDONE_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to write fill value to dataset")
+ break;
+ }
+ }
/* Indicate that blocks are being written */
blocks_written = TRUE;
@@ -559,19 +568,81 @@ H5D__contig_is_data_cached(const H5D_shared_t *shared_dset)
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info,
+H5D__contig_io_init(H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info,
hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space,
H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *cm)
{
- FUNC_ENTER_STATIC_NOERR
+ htri_t use_selection_io = FALSE; /* Whether to use selection I/O */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
io_info->store->contig.dset_addr = io_info->dset->shared->layout.storage.u.contig.addr;
io_info->store->contig.dset_size = io_info->dset->shared->layout.storage.u.contig.size;
- FUNC_LEAVE_NOAPI(SUCCEED)
+ /* Check if we're performing selection I/O */
+ if ((use_selection_io = H5D__contig_may_use_select_io(io_info, H5D_IO_OP_READ)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible")
+ io_info->use_select_io = (hbool_t)use_selection_io;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__contig_io_init() */
/*-------------------------------------------------------------------------
+ * Function: H5D__contig_may_use_select_io
+ *
+ * Purpose: A small internal function to if it may be possible to use
+ * selection I/O.
+ *
+ * Return: TRUE/FALSE/FAIL
+ *
+ * Programmer: Neil Fortner
+ * 3 August 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, H5D_io_op_type_t op_type)
+{
+ const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */
+ htri_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(io_info);
+ HDassert(dataset);
+ HDassert(op_type == H5D_IO_OP_READ || op_type == H5D_IO_OP_WRITE);
+
+ /* Don't use selection I/O if it's globally disabled, if there is a type
+ * conversion, or if it's not a contiguous dataset, or if the sieve buffer
+ * exists (write) or is dirty (read) */
+ if (!H5_use_selection_io_g || io_info->io_ops.single_read != H5D__select_read ||
+ io_info->layout_ops.readvv != H5D__contig_readvv ||
+ (op_type == H5D_IO_OP_READ && io_info->dset->shared->cache.contig.sieve_dirty) ||
+ (op_type == H5D_IO_OP_WRITE && io_info->dset->shared->cache.contig.sieve_buf))
+ ret_value = FALSE;
+ else {
+ hbool_t page_buf_enabled;
+
+ HDassert(io_info->io_ops.single_write == H5D__select_write);
+ HDassert(io_info->layout_ops.writevv == H5D__contig_writevv);
+
+ /* Check if the page buffer is enabled */
+ if (H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW, &page_buf_enabled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled")
+ if (page_buf_enabled)
+ ret_value = FALSE;
+ else
+ ret_value = TRUE;
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__contig_may_use_select_io() */
+
+/*-------------------------------------------------------------------------
* Function: H5D__contig_read
*
* Purpose: Read from a contiguous dataset.
@@ -587,7 +658,7 @@ herr_t
H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, H5S_t *file_space,
H5S_t *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *fm)
{
- herr_t ret_value = SUCCEED; /*return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -598,8 +669,20 @@ H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize
HDassert(mem_space);
HDassert(file_space);
- /* Read data */
- if ((io_info->io_ops.single_read)(io_info, type_info, nelmts, file_space, mem_space) < 0)
+ if (io_info->use_select_io) {
+ size_t dst_type_size = type_info->dst_type_size;
+
+ /* Issue selection I/O call (we can skip the page buffer because we've
+ * already verified it won't be used, and the metadata accumulator
+ * because this is raw data) */
+ if (H5F_shared_select_read(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, nelmts > 0 ? 1 : 0,
+ &mem_space, &file_space, &(io_info->store->contig.dset_addr),
+ &dst_type_size, &(io_info->u.rbuf)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "contiguous selection read failed")
+ } /* end if */
+ else
+ /* Read data through legacy (non-selection I/O) pathway */
+ if ((io_info->io_ops.single_read)(io_info, type_info, nelmts, file_space, mem_space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "contiguous read failed")
done:
@@ -622,7 +705,7 @@ herr_t
H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, H5S_t *file_space,
H5S_t *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *fm)
{
- herr_t ret_value = SUCCEED; /*return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -633,8 +716,20 @@ H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsiz
HDassert(mem_space);
HDassert(file_space);
- /* Write data */
- if ((io_info->io_ops.single_write)(io_info, type_info, nelmts, file_space, mem_space) < 0)
+ if (io_info->use_select_io) {
+ size_t dst_type_size = type_info->dst_type_size;
+
+ /* Issue selection I/O call (we can skip the page buffer because we've
+ * already verified it won't be used, and the metadata accumulator
+ * because this is raw data) */
+ if (H5F_shared_select_write(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, nelmts > 0 ? 1 : 0,
+ &mem_space, &file_space, &(io_info->store->contig.dset_addr),
+ &dst_type_size, &(io_info->u.wbuf)) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "contiguous selection write failed")
+ } /* end if */
+ else
+ /* Write data through legacy (non-selection I/O) pathway */
+ if ((io_info->io_ops.single_write)(io_info, type_info, nelmts, file_space, mem_space) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "contiguous write failed")
done:
diff --git a/src/H5Dearray.c b/src/H5Dearray.c
index abce233..cd52b66 100644
--- a/src/H5Dearray.c
+++ b/src/H5Dearray.c
@@ -417,7 +417,7 @@ H5D__earray_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void
HDassert(elmt);
/* Print element */
- HDsprintf(temp_str, "Element #%" PRIuHSIZE ":", idx);
+ HDsnprintf(temp_str, sizeof(temp_str), "Element #%" PRIuHSIZE ":", idx);
HDfprintf(stream, "%*s%-*s %" PRIuHADDR "\n", indent, "", fwidth, temp_str, *(const haddr_t *)elmt);
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -573,7 +573,7 @@ H5D__earray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const
HDassert(elmt);
/* Print element */
- HDsprintf(temp_str, "Element #%" PRIuHSIZE ":", idx);
+ HDsnprintf(temp_str, sizeof(temp_str), "Element #%" PRIuHSIZE ":", idx);
HDfprintf(stream, "%*s%-*s {%" PRIuHADDR ", %u, %0x}\n", indent, "", fwidth, temp_str, elmt->addr,
elmt->nbytes, elmt->filter_mask);
diff --git a/src/H5Defl.c b/src/H5Defl.c
index a30955b..b22c6de 100644
--- a/src/H5Defl.c
+++ b/src/H5Defl.c
@@ -60,9 +60,9 @@ typedef struct H5D_efl_writevv_ud_t {
/********************/
/* Layout operation callbacks */
-static herr_t H5D__efl_construct(H5F_t *f, H5D_t *dset);
-static herr_t H5D__efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
- H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm);
+static herr_t H5D__efl_construct(H5F_t *f, H5D_t *dset);
+static herr_t H5D__efl_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts,
+ H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm);
static ssize_t H5D__efl_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq,
size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq,
size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]);
@@ -209,7 +209,7 @@ H5D__efl_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage)
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info,
+H5D__efl_io_init(H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info,
hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space,
H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *cm)
{
diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c
index 0741e8f..ab0f0f8 100644
--- a/src/H5Dfarray.c
+++ b/src/H5Dfarray.c
@@ -415,7 +415,7 @@ H5D__farray_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void
HDassert(elmt);
/* Print element */
- HDsprintf(temp_str, "Element #%" PRIuHSIZE ":", idx);
+ HDsnprintf(temp_str, sizeof(temp_str), "Element #%" PRIuHSIZE ":", idx);
HDfprintf(stream, "%*s%-*s %" PRIuHADDR "\n", indent, "", fwidth, temp_str, *(const haddr_t *)elmt);
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -675,7 +675,7 @@ H5D__farray_filt_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const
HDassert(elmt);
/* Print element */
- HDsprintf(temp_str, "Element #%" PRIuHSIZE ":", idx);
+ HDsnprintf(temp_str, sizeof(temp_str), "Element #%" PRIuHSIZE ":", idx);
HDfprintf(stream, "%*s%-*s {%" PRIuHADDR ", %u, %0x}\n", indent, "", fwidth, temp_str, elmt->addr,
elmt->nbytes, elmt->filter_mask);
diff --git a/src/H5Dint.c b/src/H5Dint.c
index c9ea6bd..cc17265 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -378,40 +378,18 @@ H5D__get_space_status(const H5D_t *dset, H5D_space_status_t *allocation)
/* Check for chunked layout */
if (dset->shared->layout.type == H5D_CHUNKED) {
- hsize_t space_allocated; /* The number of bytes allocated for chunks */
- hssize_t snelmts; /* Temporary holder for number of elements in dataspace */
- hsize_t nelmts; /* Number of elements in dataspace */
- size_t dt_size; /* Size of datatype */
- hsize_t full_size; /* The number of bytes in the dataset when fully populated */
-
- /* For chunked layout set the space status by the storage size */
- /* Get the dataset's dataspace */
- HDassert(dset->shared->space);
-
- /* Get the total number of elements in dataset's dataspace */
- if ((snelmts = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve number of elements in dataspace")
- nelmts = (hsize_t)snelmts;
-
- /* Get the size of the dataset's datatype */
- if (0 == (dt_size = H5T_GET_SIZE(dset->shared->type)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to retrieve size of datatype")
-
- /* Compute the maximum size of the dataset in bytes */
- full_size = nelmts * dt_size;
-
- /* Check for overflow during multiplication */
- if (nelmts != (full_size / dt_size))
- HGOTO_ERROR(H5E_DATASET, H5E_OVERFLOW, FAIL, "size of dataset's storage overflowed")
-
- /* Difficult to error check, since the error value is 0 and 0 is a valid value... :-/ */
- if (H5D__get_storage_size(dset, &space_allocated) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get size of dataset's storage")
-
- /* Decide on how much of the space is allocated */
- if (space_allocated == 0)
+ hsize_t n_chunks_total = dset->shared->layout.u.chunk.nchunks;
+ hsize_t n_chunks_alloc = 0;
+
+ if (H5D__get_num_chunks(dset, dset->shared->space, &n_chunks_alloc) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "unable to retrieve number of allocated chunks in dataset")
+
+ HDassert(n_chunks_alloc <= n_chunks_total);
+
+ if (n_chunks_alloc == 0)
*allocation = H5D_SPACE_STATUS_NOT_ALLOCATED;
- else if (space_allocated == full_size)
+ else if (n_chunks_alloc == n_chunks_total)
*allocation = H5D_SPACE_STATUS_ALLOCATED;
else
*allocation = H5D_SPACE_STATUS_PART_ALLOCATED;
@@ -1301,10 +1279,19 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id, hid_t
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set latest indexing")
} /* end if */
- /* Check if this dataset is going into a parallel file and set space allocation time */
+ /* Check if the file driver would like to force early space allocation */
if (H5F_HAS_FEATURE(file, H5FD_FEAT_ALLOCATE_EARLY))
new_dset->shared->dcpl_cache.fill.alloc_time = H5D_ALLOC_TIME_EARLY;
+ /*
+ * Check if this dataset is going into a parallel file and set space allocation time.
+ * If the dataset has filters applied to it, writes to the dataset must be collective,
+ * so we don't need to force early space allocation. Otherwise, we force early space
+ * allocation to facilitate independent raw data operations.
+ */
+ if (H5F_HAS_FEATURE(file, H5FD_FEAT_HAS_MPI) && (new_dset->shared->dcpl_cache.pline.nused == 0))
+ new_dset->shared->dcpl_cache.fill.alloc_time = H5D_ALLOC_TIME_EARLY;
+
/* Set the dataset's I/O operations */
if (H5D__layout_set_io_ops(new_dset) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize I/O operations")
diff --git a/src/H5Dio.c b/src/H5Dio.c
index 1a71ce2..cb61b71 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -165,7 +165,7 @@ H5D__read(H5D_t *dataset, hid_t mem_type_id, H5S_t *mem_space, H5S_t *file_space
* difficulties with the notion.
*
* To solve this, we check to see if H5S_select_shape_same() returns true,
- * and if the ranks of the mem and file spaces are different. If the are,
+ * and if the ranks of the mem and file spaces are different. If they are,
* construct a new mem space that is equivalent to the old mem space, and
* use that instead.
*
@@ -300,6 +300,7 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, H5S_t *mem_space, H5S_t *file_spac
H5D_io_info_t io_info; /* Dataset I/O info */
H5D_type_info_t type_info; /* Datatype info for operation */
hbool_t type_info_init = FALSE; /* Whether the datatype info has been initialized */
+ hbool_t should_alloc_space = FALSE; /* Whether or not to initialize dataset's storage */
H5S_t * projected_mem_space = NULL; /* If not NULL, ptr to dataspace containing a */
/* projection of the supplied mem_space to a new */
/* dataspace with rank equal to that of */
@@ -432,8 +433,20 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, H5S_t *mem_space, H5S_t *file_spac
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to set up I/O operation")
/* Allocate dataspace and initialize it if it hasn't been. */
- if (nelmts > 0 && dataset->shared->dcpl_cache.efl.nused == 0 &&
- !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage)) {
+ should_alloc_space = dataset->shared->dcpl_cache.efl.nused == 0 &&
+ !(*dataset->shared->layout.ops->is_space_alloc)(&dataset->shared->layout.storage);
+
+ /*
+ * If not using an MPI-based VFD, we only need to allocate
+ * and initialize storage if there's a selection in the
+ * dataset's dataspace. Otherwise, we always need to participate
+ * in the storage allocation since this may use collective
+ * operations and we will hang if we don't participate.
+ */
+ if (!H5F_HAS_FEATURE(dataset->oloc.file, H5FD_FEAT_HAS_MPI))
+ should_alloc_space = should_alloc_space && (nelmts > 0);
+
+ if (should_alloc_space) {
hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */
hbool_t full_overwrite; /* Whether we are over-writing all the elements */
@@ -563,6 +576,10 @@ H5D__ioinfo_init(H5D_t *dset, const H5D_type_info_t *type_info, H5D_storage_t *s
io_info->io_ops.single_write = H5D__scatgath_write;
} /* end else */
+ /* Start with selection I/O off, layout callback will turn it on if
+ * appropriate */
+ io_info->use_select_io = FALSE;
+
#ifdef H5_HAVE_PARALLEL
/* Determine if the file was opened with an MPI VFD */
io_info->using_mpi_vfd = H5F_HAS_FEATURE(dset->oloc.file, H5FD_FEAT_HAS_MPI);
@@ -801,105 +818,47 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, const H5S_t *file_
/* Check if we can use the optimized parallel I/O routines */
if (opt == TRUE) {
- /* Override the I/O op pointers to the MPI-specific routines */
- io_info->io_ops.multi_read = dset->shared->layout.ops->par_read;
- io_info->io_ops.multi_write = dset->shared->layout.ops->par_write;
- io_info->io_ops.single_read = H5D__mpio_select_read;
- io_info->io_ops.single_write = H5D__mpio_select_write;
- } /* end if */
+ /* Override the I/O op pointers to the MPI-specific routines, unless
+ * selection I/O is to be used - in this case the file driver will
+ * handle collective I/O */
+ /* Check for selection/vector support in file driver? -NAF */
+ if (!io_info->use_select_io) {
+ io_info->io_ops.multi_read = dset->shared->layout.ops->par_read;
+ io_info->io_ops.multi_write = dset->shared->layout.ops->par_write;
+ io_info->io_ops.single_read = H5D__mpio_select_read;
+ io_info->io_ops.single_write = H5D__mpio_select_write;
+ } /* end if */
+ } /* end if */
else {
- int comm_size = 0;
-
- /* Retrieve size of MPI communicator used for file */
- if ((comm_size = H5F_shared_mpi_get_size(io_info->f_sh)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MPI communicator size")
-
/* Check if there are any filters in the pipeline. If there are,
* we cannot break to independent I/O if this is a write operation
* with multiple ranks involved; otherwise, there will be metadata
* inconsistencies in the file.
*/
- if (comm_size > 1 && io_info->op_type == H5D_IO_OP_WRITE &&
- io_info->dset->shared->dcpl_cache.pline.nused > 0) {
- H5D_mpio_no_collective_cause_t cause;
- uint32_t local_no_collective_cause;
- uint32_t global_no_collective_cause;
- hbool_t local_error_message_previously_written = FALSE;
- hbool_t global_error_message_previously_written = FALSE;
- size_t idx;
- size_t cause_strings_len;
- char local_no_collective_cause_string[512] = "";
- char global_no_collective_cause_string[512] = "";
- const char * cause_strings[] = {
- "independent I/O was requested",
- "datatype conversions were required",
- "data transforms needed to be applied",
- "optimized MPI types flag wasn't set",
- "one of the dataspaces was neither simple nor scalar",
- "dataset was not contiguous or chunked",
- "parallel writes to filtered datasets are disabled",
- "an error occurred while checking if collective I/O was possible"};
-
- cause_strings_len = sizeof(cause_strings) / sizeof(cause_strings[0]);
-
- if (H5CX_get_mpio_local_no_coll_cause(&local_no_collective_cause) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "unable to get local no collective cause value")
- if (H5CX_get_mpio_global_no_coll_cause(&global_no_collective_cause) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
- "unable to get global no collective cause value")
-
- /* Append each of the "reason for breaking collective I/O" error messages to the
- * local and global no collective cause strings */
- for (cause = 1, idx = 0;
- (cause < H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE) && (idx < cause_strings_len);
- cause <<= 1, idx++) {
- if (cause & local_no_collective_cause) {
- size_t local_buffer_space = sizeof(local_no_collective_cause_string) -
- HDstrlen(local_no_collective_cause_string) - 1;
-
- /* Check if there were any previous error messages included. If so, prepend a
- * semicolon to separate the messages.
- */
- if (local_buffer_space && local_error_message_previously_written) {
- HDstrncat(local_no_collective_cause_string, "; ", local_buffer_space);
- local_buffer_space -= MIN(local_buffer_space, 2);
- }
-
- if (local_buffer_space)
- HDstrncat(local_no_collective_cause_string, cause_strings[idx],
- local_buffer_space);
-
- local_error_message_previously_written = TRUE;
- } /* end if */
-
- if (cause & global_no_collective_cause) {
- size_t global_buffer_space = sizeof(global_no_collective_cause_string) -
- HDstrlen(global_no_collective_cause_string) - 1;
-
- /* Check if there were any previous error messages included. If so, prepend a
- * semicolon to separate the messages.
- */
- if (global_buffer_space && global_error_message_previously_written) {
- HDstrncat(global_no_collective_cause_string, "; ", global_buffer_space);
- global_buffer_space -= MIN(global_buffer_space, 2);
- }
-
- if (global_buffer_space)
- HDstrncat(global_no_collective_cause_string, cause_strings[idx],
- global_buffer_space);
-
- global_error_message_previously_written = TRUE;
- } /* end if */
- } /* end for */
-
- HGOTO_ERROR(H5E_IO, H5E_NO_INDEPENDENT, FAIL,
- "Can't perform independent write with filters in pipeline.\n"
- " The following caused a break from collective I/O:\n"
- " Local causes: %s\n"
- " Global causes: %s",
- local_no_collective_cause_string, global_no_collective_cause_string);
- } /* end if */
+ if (io_info->op_type == H5D_IO_OP_WRITE && io_info->dset->shared->dcpl_cache.pline.nused > 0) {
+ int comm_size = 0;
+
+ /* Retrieve size of MPI communicator used for file */
+ if ((comm_size = H5F_shared_mpi_get_size(io_info->f_sh)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MPI communicator size")
+
+ if (comm_size > 1) {
+ char local_no_coll_cause_string[512];
+ char global_no_coll_cause_string[512];
+
+ if (H5D__mpio_get_no_coll_cause_strings(local_no_coll_cause_string, 512,
+ global_no_coll_cause_string, 512) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "can't get reasons for breaking collective I/O")
+
+ HGOTO_ERROR(H5E_IO, H5E_NO_INDEPENDENT, FAIL,
+ "Can't perform independent write with filters in pipeline.\n"
+ " The following caused a break from collective I/O:\n"
+ " Local causes: %s\n"
+ " Global causes: %s",
+ local_no_coll_cause_string, global_no_coll_cause_string);
+ }
+ }
/* If we won't be doing collective I/O, but the user asked for
* collective I/O, change the request to use independent I/O
diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c
index 6c4fc12..6fdec05 100644
--- a/src/H5Dlayout.c
+++ b/src/H5Dlayout.c
@@ -213,7 +213,7 @@ H5D__layout_meta_size(const H5F_t *f, const H5O_layout_t *layout, hbool_t includ
ret_value++;
/* Dimension sizes */
- ret_value += layout->u.chunk.ndims * layout->u.chunk.enc_bytes_per_dim;
+ ret_value += layout->u.chunk.ndims * (size_t)layout->u.chunk.enc_bytes_per_dim;
/* Type of chunk index */
ret_value++;
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index f8bce33..2cde4d3 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -36,6 +36,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5Fprivate.h" /* File access */
#include "H5FDprivate.h" /* File drivers */
+#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5Oprivate.h" /* Object headers */
@@ -43,6 +44,15 @@
#include "H5Sprivate.h" /* Dataspaces */
#include "H5VMprivate.h" /* Vector */
+/* uthash is an external, header-only hash table implementation.
+ *
+ * We include the file directly in src/ and #define a few functions
+ * to use our internal memory calls.
+ */
+#define uthash_malloc(sz) H5MM_malloc(sz)
+#define uthash_free(ptr, sz) H5MM_free(ptr) /* Ignoring sz is intentional */
+#include "uthash.h"
+
#ifdef H5_HAVE_PARALLEL
/****************/
@@ -81,9 +91,54 @@
/* Macros to represent the regularity of the selection for multiple chunk IO case. */
#define H5D_CHUNK_SELECT_REG 1
+/*
+ * Threshold value for redistributing shared filtered chunks
+ * on all MPI ranks, or just MPI rank 0
+ */
+#define H5D_CHUNK_REDISTRIBUTE_THRES ((size_t)((25 * H5_MB) / sizeof(H5D_chunk_redistribute_info_t)))
+
+/*
+ * Initial allocation size for the arrays that hold
+ * buffers for chunk modification data that is sent
+ * to other ranks and the MPI_Request objects for
+ * those send operations
+ */
+#define H5D_CHUNK_NUM_SEND_MSGS_INIT 64
+
+/*
+ * Define a tag value for the MPI messages sent/received for
+ * chunk modification data
+ */
+#define H5D_CHUNK_MOD_DATA_TAG 64
+
+/*
+ * Macro to initialize a H5D_chk_idx_info_t
+ * structure, given a pointer to a H5D_io_info_t
+ * structure
+ */
+#define H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, io_info_ptr) \
+ do { \
+ index_info.f = (io_info_ptr)->dset->oloc.file; \
+ index_info.pline = &((io_info_ptr)->dset->shared->dcpl_cache.pline); \
+ index_info.layout = &((io_info_ptr)->dset->shared->layout.u.chunk); \
+ index_info.storage = &((io_info_ptr)->dset->shared->layout.storage.u.chunk); \
+ } while (0)
+
+/*
+ * Macro to initialize a H5D_chunk_ud_t structure
+ * given a pointer to a H5D_chk_idx_info_t structure
+ */
+#define H5D_MPIO_INIT_CHUNK_UD_INFO(chunk_ud, index_info_ptr) \
+ do { \
+ HDmemset(&chunk_ud, 0, sizeof(H5D_chunk_ud_t)); \
+ chunk_ud.common.layout = (index_info_ptr)->layout; \
+ chunk_ud.common.storage = (index_info_ptr)->storage; \
+ } while (0)
+
/******************/
/* Local Typedefs */
/******************/
+
/* Combine chunk address and chunk info into a struct for better performance. */
typedef struct H5D_chunk_addr_info_t {
haddr_t chunk_addr;
@@ -100,115 +155,137 @@ typedef enum H5D_mpio_no_rank0_bcast_cause_t {
} H5D_mpio_no_rank0_bcast_cause_t;
/*
+ * Information necessary for re-allocating file space for a chunk
+ * during a parallel write of a chunked dataset with filters
+ * applied.
+ */
+typedef struct H5D_chunk_alloc_info_t {
+ H5F_block_t chunk_current;
+ H5F_block_t chunk_new;
+ hsize_t chunk_idx;
+} H5D_chunk_alloc_info_t;
+
+/*
+ * Information for a chunk pertaining to the dataset's chunk
+ * index entry for the chunk
+ */
+typedef struct H5D_chunk_index_info_t {
+ hsize_t chunk_idx;
+ unsigned filter_mask;
+ hbool_t need_insert;
+} H5D_chunk_index_info_t;
+
+/*
* Information about a single chunk when performing collective filtered I/O. All
* of the fields of one of these structs are initialized at the start of collective
- * filtered I/O in the function H5D__construct_filtered_io_info_list().
- *
- * This struct's fields are as follows:
- *
- * index - The "Index" of the chunk in the dataset. The index of a chunk is used during
- * the collective re-insertion of chunks into the chunk index after the collective
- * I/O has been performed.
+ * filtered I/O in the function H5D__mpio_collective_filtered_chunk_io_setup(). This
+ * struct's fields are as follows:
*
- * scaled - The scaled coordinates of the chunk in the dataset's file dataspace. The
- * coordinates are used in both the collective re-allocation of space in the file
- * and the collective re-insertion of chunks into the chunk index after the collective
- * I/O has been performed.
+ * index_info - A structure containing the information needed when collectively
+ * re-inserting the chunk into the dataset's chunk index. The structure
+ * is distributed to all ranks during the re-insertion operation. Its fields
+ * are as follows:
*
- * full_overwrite - A flag which determines whether or not a chunk needs to be read from the
- * file when being updated. If a chunk is being fully overwritten (the entire
- * extent is selected in its file dataspace), then it is not necessary to
- * read the chunk from the file. However, if the chunk is not being fully
- * overwritten, it has to be read from the file in order to update the chunk
- * without trashing the parts of the chunk that are not selected.
+ * chunk_idx - The index of the chunk in the dataset's chunk index.
*
- * num_writers - The total number of processors writing to this chunk. This field is used
- * when the new owner of a chunk is receiving messages, which contain selections in
- * the chunk and data to update the chunk with, from other processors which have this
- * chunk selected in the I/O operation. The new owner must know how many processors it
- * should expect messages from so that it can post an equal number of receive calls.
+ * filter_mask - A bit-mask that indicates which filters are to be applied to the
+ * chunk. Each filter in a chunk's filter pipeline has a bit position
+ * that can be masked to disable that particular filter for the chunk.
+ * This filter mask is saved alongside the chunk in the file.
*
- * io_size - The total size of I/O to this chunk. This field is an accumulation of the size of
- * I/O to the chunk from each processor which has the chunk selected and is used to
- * determine the value for the previous full_overwrite flag.
+ * need_insert - A flag which determines whether or not a chunk needs to be re-inserted into
+ * the chunk index after the write operation.
*
- * buf - A pointer which serves the dual purpose of holding either the chunk data which is to be
- * written to the file or the chunk data which has been read from the file.
+ * chunk_info - A pointer to the chunk's H5D_chunk_info_t structure, which contains useful
+ * information like the dataspaces containing the selection in the chunk.
*
- * chunk_states - In the case of dataset writes only, this struct is used to track a chunk's size and
- * address in the file before and after the filtering operation has occurred.
+ * chunk_current - The address in the file and size of this chunk before the filtering
+ * operation. When reading a chunk from the file, this field is used to
+ * read the correct amount of bytes. It is also used when redistributing
+ * shared chunks among MPI ranks and as a parameter to the chunk file
+ * space reallocation function.
*
- * Its fields are as follows:
+ * chunk_new - The address in the file and size of this chunk after the filtering
+ * operation. This field is relevant when collectively re-allocating space
+ * in the file for all of the chunks written to in the I/O operation, as
+ * their sizes may have changed after their data has been filtered.
*
- * chunk_current - The address in the file and size of this chunk before the filtering
- * operation. When reading a chunk from the file, this field is used to
- * read the correct amount of bytes. It is also used when redistributing
- * shared chunks among processors and as a parameter to the chunk file
- * space reallocation function.
+ * need_read - A flag which determines whether or not a chunk needs to be read from the
+ * file. During writes, if a chunk is being fully overwritten (the entire extent
+ * is selected in its file dataspace), then it is not necessary to read the chunk
+ * from the file. However, if the chunk is not being fully overwritten, it has to
+ * be read from the file in order to update the chunk without trashing the parts
+ * of the chunk that are not selected. During reads, this field should generally
+ * be true, but may be false if the chunk isn't allocated, for example.
*
- * new_chunk - The address in the file and size of this chunk after the filtering
- * operation. This field is relevant when collectively re-allocating space
- * in the file for all of the chunks written to in the I/O operation, as
- * their sizes may have changed after their data has been filtered.
+ * skip_filter_pline - A flag which determines whether to skip calls to the filter pipeline
+ * for this chunk. This flag is mostly useful for correct handling of
+ * partial edge chunks when the "don't filter partial edge chunks" flag
+ * is set on the dataset's DCPL.
*
- * owners - In the case of dataset writes only, this struct is used to manage which single processor
- * will ultimately write data out to the chunk. It allows the other processors to act according
- * to the decision and send their selection in the chunk, as well as the data they wish
- * to update the chunk with, to the processor which is writing to the chunk.
+ * io_size - The total size of I/O to this chunk. This field is an accumulation of the size of
+ * I/O to the chunk from each MPI rank which has the chunk selected and is used to
+ * determine the value for the previous `full_overwrite` flag.
*
- * Its fields are as follows:
+ * chunk_buf_size - The size in bytes of the data buffer allocated for the chunk
*
- * original_owner - The processor which originally had this chunk selected at the beginning of
- * the collective filtered I/O operation. This field is currently used when
- * redistributing shared chunks among processors.
+ * orig_owner - The MPI rank which originally had this chunk selected at the beginning of
+ * the collective filtered I/O operation. This field is currently used when
+ * redistributing shared chunks among MPI ranks.
*
- * new_owner - The processor which has been selected to perform the write to this chunk.
+ * new_owner - The MPI rank which has been selected to perform the modifications to this chunk.
*
- * async_info - In the case of dataset writes only, this struct is used by the owning processor of the
- * chunk in order to manage the MPI send and receive calls made between it and all of
- * the other processors which have this chunk selected in the I/O operation.
+ * num_writers - The total number of MPI ranks writing to this chunk. This field is used when
+ * the new owner of a chunk is receiving messages from other MPI ranks that
+ * contain their selections in the chunk and the data to update the chunk with.
+ * The new owner must know how many MPI ranks it should expect messages from so
+ * that it can post an equal number of receive calls.
*
- * Its fields are as follows:
+ * buf - A pointer which serves the dual purpose of holding either the chunk data which is to be
+ * written to the file or the chunk data which has been read from the file.
*
- * receive_requests_array - An array containing one MPI_Request for each of the
- * asynchronous MPI receive calls the owning processor of this
- * chunk makes to another processor in order to receive that
- * processor's chunk modification data and selection in the chunk.
+ * hh - A handle for hash tables provided by the uthash.h header
*
- * receive_buffer_array - An array of buffers into which the owning processor of this chunk
- * will store chunk modification data and the selection in the chunk
- * received from another processor.
- *
- * num_receive_requests - The number of entries in the receive_request_array and
- * receive_buffer_array fields.
*/
typedef struct H5D_filtered_collective_io_info_t {
- hsize_t index;
- hsize_t scaled[H5O_LAYOUT_NDIMS];
- hbool_t full_overwrite;
- size_t num_writers;
- size_t io_size;
- void * buf;
-
- struct {
- H5F_block_t chunk_current;
- H5F_block_t new_chunk;
- } chunk_states;
-
- struct {
- int original_owner;
- int new_owner;
- } owners;
-
- struct {
- MPI_Request * receive_requests_array;
- unsigned char **receive_buffer_array;
- int num_receive_requests;
- } async_info;
+ H5D_chunk_index_info_t index_info;
+
+ H5D_chunk_info_t *chunk_info;
+ H5F_block_t chunk_current;
+ H5F_block_t chunk_new;
+ hbool_t need_read;
+ hbool_t skip_filter_pline;
+ size_t io_size;
+ size_t chunk_buf_size;
+ int orig_owner;
+ int new_owner;
+ int num_writers;
+ void * buf;
+
+ UT_hash_handle hh;
} H5D_filtered_collective_io_info_t;
-/* Function pointer typedef for sort function */
-typedef int (*H5D_mpio_sort_func_cb_t)(const void *, const void *);
+/*
+ * Information necessary for redistributing shared chunks during
+ * a parallel write of a chunked dataset with filters applied.
+ */
+typedef struct H5D_chunk_redistribute_info_t {
+ H5F_block_t chunk_block;
+ hsize_t chunk_idx;
+ int orig_owner;
+ int new_owner;
+ int num_writers;
+} H5D_chunk_redistribute_info_t;
+
+/*
+ * Information used when re-inserting a chunk into a dataset's
+ * chunk index during a parallel write of a chunked dataset with
+ * filters applied.
+ */
+typedef struct H5D_chunk_insert_info_t {
+ H5F_block_t chunk_block;
+ H5D_chunk_index_info_t index_info;
+} H5D_chunk_insert_info_t;
/********************/
/* Local Prototypes */
@@ -216,53 +293,98 @@ typedef int (*H5D_mpio_sort_func_cb_t)(const void *, const void *);
static herr_t H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
H5D_chunk_map_t *fm);
static herr_t H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm);
+ H5D_chunk_map_t *fm, int mpi_rank, int mpi_size);
static herr_t H5D__multi_chunk_filtered_collective_io(H5D_io_info_t * io_info,
- const H5D_type_info_t *type_info, H5D_chunk_map_t *fm);
+ const H5D_type_info_t *type_info, H5D_chunk_map_t *fm,
+ int mpi_rank, int mpi_size);
static herr_t H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm, int sum_chunk);
+ H5D_chunk_map_t *fm, int sum_chunk, int mpi_rank, int mpi_size);
static herr_t H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm);
+ H5D_chunk_map_t *fm, int mpi_rank, int mpi_size);
static herr_t H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
const H5S_t *file_space, const H5S_t *mem_space);
static herr_t H5D__final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
hsize_t nelmts, MPI_Datatype mpi_file_type, MPI_Datatype mpi_buf_type);
static herr_t H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
- H5D_chunk_addr_info_t chunk_addr_info_array[], int many_chunk_opt);
+ H5D_chunk_addr_info_t chunk_addr_info_array[], int many_chunk_opt, int mpi_rank,
+ int mpi_size);
static herr_t H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm, uint8_t assign_io_mode[],
- haddr_t chunk_addr[]);
+ haddr_t chunk_addr[], int mpi_rank, int mpi_size);
static herr_t H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
int *sum_chunkf);
-static herr_t H5D__construct_filtered_io_info_list(const H5D_io_info_t * io_info,
- const H5D_type_info_t * type_info,
- const H5D_chunk_map_t * fm,
- H5D_filtered_collective_io_info_t **chunk_list,
- size_t * num_entries);
-#if MPI_VERSION >= 3
-static herr_t H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t * io_info,
- const H5D_type_info_t * type_info,
- const H5D_chunk_map_t * fm,
- H5D_filtered_collective_io_info_t *local_chunk_array,
- size_t *local_chunk_array_num_entries);
-#endif
-static herr_t H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries,
- size_t array_entry_size, void **gathered_array,
- size_t *gathered_array_num_entries, hbool_t allgather, int root,
- MPI_Comm comm, int (*sort_func)(const void *, const void *));
-static herr_t H5D__mpio_filtered_collective_write_type(H5D_filtered_collective_io_info_t *chunk_list,
- size_t num_entries, MPI_Datatype *new_mem_type,
- hbool_t *mem_type_derived, MPI_Datatype *new_file_type,
- hbool_t *file_type_derived);
-static herr_t H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk_entry,
- const H5D_io_info_t * io_info,
- const H5D_type_info_t * type_info,
- const H5D_chunk_map_t * fm);
+static herr_t H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t * io_info,
+ const H5D_type_info_t * type_info,
+ const H5D_chunk_map_t * fm,
+ H5D_filtered_collective_io_info_t **chunk_list,
+ size_t *num_entries, int mpi_rank);
+static herr_t H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
+ int mpi_rank, int mpi_size,
+ size_t **rank_chunks_assigned_map);
+static herr_t H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t * num_chunks_assigned_map,
+ hbool_t all_ranks_involved,
+ const H5D_io_info_t * io_info,
+ const H5D_chunk_map_t *fm, int mpi_rank, int mpi_size);
+static herr_t H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t *chunk_list_num_entries, H5D_io_info_t *io_info,
+ const H5D_type_info_t *type_info, int mpi_rank,
+ int mpi_size,
+ H5D_filtered_collective_io_info_t **chunk_hash_table,
+ unsigned char *** chunk_msg_bufs,
+ int * chunk_msg_bufs_len);
+static herr_t H5D__mpio_collective_filtered_chunk_common_io(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ const H5D_io_info_t * io_info,
+ const H5D_type_info_t *type_info, int mpi_size);
+static herr_t H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ const H5D_io_info_t * io_info,
+ const H5D_type_info_t *type_info, int mpi_rank,
+ int mpi_size);
+static herr_t H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ H5D_filtered_collective_io_info_t *chunk_hash_table,
+ unsigned char ** chunk_msg_bufs,
+ int chunk_msg_bufs_len, const H5D_io_info_t *io_info,
+ const H5D_type_info_t *type_info, int mpi_rank,
+ int mpi_size);
+static herr_t H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ size_t * num_chunks_assigned_map,
+ H5D_io_info_t * io_info,
+ H5D_chk_idx_info_t *idx_info, int mpi_rank,
+ int mpi_size);
+static herr_t H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ size_t * num_chunks_assigned_map,
+ H5D_io_info_t * io_info,
+ H5D_chk_idx_info_t *idx_info, int mpi_rank,
+ int mpi_size);
+static herr_t H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type,
+ hbool_t * contig_type_derived,
+ MPI_Datatype *resized_type,
+ hbool_t * resized_type_derived);
+static herr_t H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived,
+ MPI_Datatype *resized_type, hbool_t *resized_type_derived);
+static herr_t H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived,
+ MPI_Datatype *resized_type,
+ hbool_t * resized_type_derived);
+static herr_t H5D__mpio_collective_filtered_io_type(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t num_entries, H5D_io_op_type_t op_type,
+ MPI_Datatype *new_mem_type, hbool_t *mem_type_derived,
+ MPI_Datatype *new_file_type, hbool_t *file_type_derived);
static int H5D__cmp_chunk_addr(const void *chunk_addr_info1, const void *chunk_addr_info2);
static int H5D__cmp_filtered_collective_io_info_entry(const void *filtered_collective_io_info_entry1,
const void *filtered_collective_io_info_entry2);
-#if MPI_VERSION >= 3
-static int H5D__cmp_filtered_collective_io_info_entry_owner(const void *filtered_collective_io_info_entry1,
- const void *filtered_collective_io_info_entry2);
+static int H5D__cmp_chunk_redistribute_info(const void *entry1, const void *entry2);
+static int H5D__cmp_chunk_redistribute_info_orig_owner(const void *entry1, const void *entry2);
+
+#ifdef H5Dmpio_DEBUG
+static herr_t H5D__mpio_debug_init(void);
+static herr_t H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, int mpi_rank);
#endif
/*********************/
@@ -273,6 +395,188 @@ static int H5D__cmp_filtered_collective_io_info_entry_owner(const void *filtered
/* Local Variables */
/*******************/
+/* Declare extern free list to manage the H5S_sel_iter_t struct */
+H5FL_EXTERN(H5S_sel_iter_t);
+
+#ifdef H5Dmpio_DEBUG
+
+/* Flags to control debug actions in this file.
+ * (Meant to be indexed by characters)
+ *
+ * These flags can be set with either (or both) the environment variable
+ * "H5D_mpio_Debug" set to a string containing one or more characters
+ * (flags) or by setting them as a string value for the
+ * "H5D_mpio_debug_key" MPI Info key.
+ *
+ * Supported characters in 'H5D_mpio_Debug' string:
+ * 't' trace function entry and exit
+ * 'f' log to file rather than debugging stream
+ * 'm' show (rough) memory usage statistics
+ * 'c' show critical timing information
+ *
+ * To only show output from a particular MPI rank, specify its rank
+ * number as a character, e.g.:
+ *
+ * '0' only show output from rank 0
+ *
+ * To only show output from a particular range (up to 8 ranks supported
+ * between 0-9) of MPI ranks, specify the start and end ranks separated
+ * by a hyphen, e.g.:
+ *
+ * '0-7' only show output from ranks 0 through 7
+ *
+ */
+static int H5D_mpio_debug_flags_s[256];
+static int H5D_mpio_debug_rank_s[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static hbool_t H5D_mpio_debug_inited = FALSE;
+static const char *const trace_in_pre = "-> ";
+static const char *const trace_out_pre = "<- ";
+static int debug_indent = 0;
+static FILE * debug_stream = NULL;
+
+/* Determine if this rank should output debugging info */
+#define H5D_MPIO_DEBUG_THIS_RANK(rank) \
+ (H5D_mpio_debug_rank_s[0] < 0 || rank == H5D_mpio_debug_rank_s[0] || rank == H5D_mpio_debug_rank_s[1] || \
+ rank == H5D_mpio_debug_rank_s[2] || rank == H5D_mpio_debug_rank_s[3] || \
+ rank == H5D_mpio_debug_rank_s[4] || rank == H5D_mpio_debug_rank_s[5] || \
+ rank == H5D_mpio_debug_rank_s[6] || rank == H5D_mpio_debug_rank_s[7])
+
+/* Print some debugging string */
+#define H5D_MPIO_DEBUG(rank, string) \
+ do { \
+ if (debug_stream && H5D_MPIO_DEBUG_THIS_RANK(rank)) { \
+ HDfprintf(debug_stream, "%*s(Rank %d) " string "\n", debug_indent, "", rank); \
+ HDfflush(debug_stream); \
+ } \
+ } while (0)
+
+/* Print some debugging string with printf-style arguments */
+#define H5D_MPIO_DEBUG_VA(rank, string, ...) \
+ do { \
+ if (debug_stream && H5D_MPIO_DEBUG_THIS_RANK(rank)) { \
+ HDfprintf(debug_stream, "%*s(Rank %d) " string "\n", debug_indent, "", rank, __VA_ARGS__); \
+ HDfflush(debug_stream); \
+ } \
+ } while (0)
+
+#define H5D_MPIO_TRACE_ENTER(rank) \
+ do { \
+ hbool_t trace_flag = H5D_mpio_debug_flags_s[(int)'t']; \
+ \
+ if (trace_flag) { \
+ H5D_MPIO_DEBUG_VA(rank, "%s%s", trace_in_pre, __func__); \
+ debug_indent += (int)HDstrlen(trace_in_pre); \
+ } \
+ } while (0)
+
+#define H5D_MPIO_TRACE_EXIT(rank) \
+ do { \
+ hbool_t trace_flag = H5D_mpio_debug_flags_s[(int)'t']; \
+ \
+ if (trace_flag) { \
+ debug_indent -= (int)HDstrlen(trace_out_pre); \
+ H5D_MPIO_DEBUG_VA(rank, "%s%s", trace_out_pre, __func__); \
+ } \
+ } while (0)
+
+#define H5D_MPIO_TIME_START(rank, op_name) \
+ { \
+ hbool_t time_flag = H5D_mpio_debug_flags_s[(int)'c']; \
+ double start_time = 0.0, end_time = 0.0; \
+ const char *const op = op_name; \
+ \
+ if (time_flag) { \
+ start_time = MPI_Wtime(); \
+ }
+
+#define H5D_MPIO_TIME_STOP(rank) \
+ if (time_flag) { \
+ end_time = MPI_Wtime(); \
+ H5D_MPIO_DEBUG_VA(rank, "'%s' took %f seconds", op, (end_time - start_time)); \
+ } \
+ }
+
+/*---------------------------------------------------------------------------
+ * Function: H5D__mpio_parse_debug_str
+ *
+ * Purpose: Parse a string for H5Dmpio-related debugging flags
+ *
+ * Returns: N/A
+ *
+ *---------------------------------------------------------------------------
+ */
+static void
+H5D__mpio_parse_debug_str(const char *s)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ HDassert(s);
+
+ while (*s) {
+ int c = (int)(*s);
+
+ if (c >= (int)'0' && c <= (int)'9') {
+ hbool_t range = FALSE;
+
+ if (*(s + 1) && *(s + 2))
+ range = (int)*(s + 1) == '-' && (int)*(s + 2) >= (int)'0' && (int)*(s + 2) <= (int)'9';
+
+ if (range) {
+ int start_rank = c - (int)'0';
+ int end_rank = (int)*(s + 2) - '0';
+ int num_ranks = end_rank - start_rank + 1;
+ int i;
+
+ if (num_ranks > 8) {
+ end_rank = start_rank + 7;
+ num_ranks = 8;
+ }
+
+ for (i = 0; i < num_ranks; i++)
+ H5D_mpio_debug_rank_s[i] = start_rank++;
+
+ s += 3;
+ }
+ else
+ H5D_mpio_debug_rank_s[0] = c - (int)'0';
+ }
+ else
+ H5D_mpio_debug_flags_s[c]++;
+
+ s++;
+ }
+
+ FUNC_LEAVE_NOAPI_VOID
+}
+
+static herr_t
+H5D__mpio_debug_init(void)
+{
+ const char *debug_str;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ HDassert(!H5D_mpio_debug_inited);
+
+ /* Clear the debug flag buffer */
+ HDmemset(H5D_mpio_debug_flags_s, 0, sizeof(H5D_mpio_debug_flags_s));
+
+ /* Retrieve and parse the H5Dmpio debug string */
+ debug_str = HDgetenv("H5D_mpio_Debug");
+ if (debug_str)
+ H5D__mpio_parse_debug_str(debug_str);
+
+ if (H5DEBUG(D))
+ debug_stream = H5DEBUG(D);
+
+ H5D_mpio_debug_inited = TRUE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
+#endif
+
/*-------------------------------------------------------------------------
* Function: H5D__mpio_opt_possible
*
@@ -347,14 +651,9 @@ H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space, co
* use collective IO will defer until each chunk IO is reached.
*/
-#if MPI_VERSION < 3
- /*
- * Don't allow parallel writes to filtered datasets if the MPI version
- * is less than 3. The functions needed (MPI_Mprobe and MPI_Imrecv) will
- * not be available.
- */
- if (io_info->op_type == H5D_IO_OP_WRITE && io_info->dset->shared->layout.type == H5D_CHUNKED &&
- io_info->dset->shared->dcpl_cache.pline.nused > 0)
+#ifndef H5_HAVE_PARALLEL_FILTERED_WRITES
+ /* Don't allow writes to filtered datasets if the functionality is disabled */
+ if (io_info->op_type == H5D_IO_OP_WRITE && io_info->dset->shared->dcpl_cache.pline.nused > 0)
local_cause[0] |= H5D_MPIO_PARALLEL_FILTERED_WRITES_DISABLED;
#endif
@@ -437,6 +736,150 @@ done:
} /* H5D__mpio_opt_possible() */
/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_get_no_coll_cause_strings
+ *
+ * Purpose: When collective I/O is broken internally, it can be useful
+ * for users to see a representative string for the reason(s)
+ * why it was broken. This routine inspects the current
+ * "cause" flags from the API context and prints strings into
+ * the caller's buffers for the local and global reasons that
+ * collective I/O was broken.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len, char *global_cause,
+ size_t global_cause_len)
+{
+ uint32_t local_no_coll_cause;
+ uint32_t global_no_coll_cause;
+ size_t local_cause_bytes_written = 0;
+ size_t global_cause_bytes_written = 0;
+ int nbits;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ HDassert((local_cause && local_cause_len > 0) || (global_cause && global_cause_len > 0));
+
+ /*
+ * Use compile-time assertion so this routine is updated
+ * when any new "no collective cause" values are added
+ */
+ HDcompile_assert(H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE == (H5D_mpio_no_collective_cause_t)256);
+
+ /* Initialize output buffers */
+ if (local_cause)
+ *local_cause = '\0';
+ if (global_cause)
+ *global_cause = '\0';
+
+ /* Retrieve the local and global cause flags from the API context */
+ if (H5CX_get_mpio_local_no_coll_cause(&local_no_coll_cause) < 0)
+ HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "unable to get local no collective cause value")
+ if (H5CX_get_mpio_global_no_coll_cause(&global_no_coll_cause) < 0)
+ HGOTO_ERROR(H5E_CONTEXT, H5E_CANTGET, FAIL, "unable to get global no collective cause value")
+
+ /*
+ * Append each of the "reason for breaking collective I/O"
+ * error messages to the local and global cause string buffers
+ */
+ nbits = 8 * sizeof(local_no_coll_cause);
+ for (int bit_pos = 0; bit_pos < nbits; bit_pos++) {
+ H5D_mpio_no_collective_cause_t cur_cause;
+ const char * cause_str;
+ size_t buf_space_left;
+
+ cur_cause = (H5D_mpio_no_collective_cause_t)(1 << bit_pos);
+ if (cur_cause == H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE)
+ break;
+
+ switch (cur_cause) {
+ case H5D_MPIO_SET_INDEPENDENT:
+ cause_str = "independent I/O was requested";
+ break;
+ case H5D_MPIO_DATATYPE_CONVERSION:
+ cause_str = "datatype conversions were required";
+ break;
+ case H5D_MPIO_DATA_TRANSFORMS:
+ cause_str = "data transforms needed to be applied";
+ break;
+ case H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED:
+ cause_str = "optimized MPI types flag wasn't set";
+ break;
+ case H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES:
+ cause_str = "one of the dataspaces was neither simple nor scalar";
+ break;
+ case H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET:
+ cause_str = "dataset was not contiguous or chunked";
+ break;
+ case H5D_MPIO_PARALLEL_FILTERED_WRITES_DISABLED:
+ cause_str = "parallel writes to filtered datasets are disabled";
+ break;
+ case H5D_MPIO_ERROR_WHILE_CHECKING_COLLECTIVE_POSSIBLE:
+ cause_str = "an error occurred while checking if collective I/O was possible";
+ break;
+ case H5D_MPIO_COLLECTIVE:
+ case H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE:
+ default:
+ HDassert(0 && "invalid no collective cause reason");
+ break;
+ }
+
+ /*
+ * Determine if the local reasons for breaking collective I/O
+ * included the current cause
+ */
+ if (local_cause && (cur_cause & local_no_coll_cause)) {
+ buf_space_left = local_cause_len - local_cause_bytes_written;
+
+ /*
+ * Check if there were any previous error messages included. If
+ * so, prepend a semicolon to separate the messages.
+ */
+ if (buf_space_left && local_cause_bytes_written) {
+ HDstrncat(local_cause, "; ", buf_space_left);
+ local_cause_bytes_written += MIN(buf_space_left, 2);
+ buf_space_left -= MIN(buf_space_left, 2);
+ }
+
+ if (buf_space_left) {
+ HDstrncat(local_cause, cause_str, buf_space_left);
+ local_cause_bytes_written += MIN(buf_space_left, HDstrlen(cause_str));
+ }
+ }
+
+ /*
+ * Determine if the global reasons for breaking collective I/O
+ * included the current cause
+ */
+ if (global_cause && (cur_cause & global_no_coll_cause)) {
+ buf_space_left = global_cause_len - global_cause_bytes_written;
+
+ /*
+ * Check if there were any previous error messages included. If
+ * so, prepend a semicolon to separate the messages.
+ */
+ if (buf_space_left && global_cause_bytes_written) {
+ HDstrncat(global_cause, "; ", buf_space_left);
+ global_cause_bytes_written += MIN(buf_space_left, 2);
+ buf_space_left -= MIN(buf_space_left, 2);
+ }
+
+ if (buf_space_left) {
+ HDstrncat(global_cause, cause_str, buf_space_left);
+ global_cause_bytes_written += MIN(buf_space_left, HDstrlen(cause_str));
+ }
+ }
+ }
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_get_no_coll_cause_strings() */
+
+/*-------------------------------------------------------------------------
* Function: H5D__mpio_select_read
*
* Purpose: MPI-IO function to read directly from app buffer to file.
@@ -500,145 +943,6 @@ done:
} /* end H5D__mpio_select_write() */
/*-------------------------------------------------------------------------
- * Function: H5D__mpio_array_gatherv
- *
- * Purpose: Given an array, specified in local_array, by each processor
- * calling this function, collects each array into a single
- * array which is then either gathered to the processor
- * specified by root, when allgather is false, or is
- * distributed back to all processors when allgather is true.
- *
- * The number of entries in the array contributed by an
- * individual processor and the size of each entry should be
- * specified in local_array_num_entries and array_entry_size,
- * respectively.
- *
- * The MPI communicator to use should be specified for comm.
- *
- * If the sort_func argument is supplied, the array is sorted
- * before the function returns.
- *
- * Note: if allgather is specified as true, root is ignored.
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Jordan Henderson
- * Sunday, April 9th, 2017
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5D__mpio_array_gatherv(void *local_array, size_t local_array_num_entries, size_t array_entry_size,
- void **_gathered_array, size_t *_gathered_array_num_entries, hbool_t allgather,
- int root, MPI_Comm comm, H5D_mpio_sort_func_cb_t sort_func)
-{
- size_t gathered_array_num_entries = 0; /* The size of the newly-constructed array */
- void * gathered_array = NULL; /* The newly-constructed array returned to the caller */
- int *receive_counts_array = NULL; /* Array containing number of entries each processor is contributing */
- int *displacements_array =
- NULL; /* Array of displacements where each processor places its data in the final array */
- int mpi_code, mpi_rank, mpi_size;
- int sendcount;
- herr_t ret_value = SUCCEED;
-
- FUNC_ENTER_STATIC
-
- HDassert(_gathered_array);
- HDassert(_gathered_array_num_entries);
-
- MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
-
- /* Determine the size of the end result array by collecting the number
- * of entries contributed by each processor into a single total.
- */
- if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&local_array_num_entries, &gathered_array_num_entries, 1,
- MPI_INT, MPI_SUM, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code)
-
- /* If 0 entries resulted from the collective operation, no processor is contributing anything and there is
- * nothing to do */
- if (gathered_array_num_entries > 0) {
- /*
- * If gathering to all processors, all processors need to allocate space for the resulting array, as
- * well as the receive counts and displacements arrays for the collective MPI_Allgatherv call.
- * Otherwise, only the root processor needs to allocate the space for an MPI_Gatherv call.
- */
- if (allgather || (mpi_rank == root)) {
- if (NULL == (gathered_array = H5MM_malloc(gathered_array_num_entries * array_entry_size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate gathered array")
-
- if (NULL == (receive_counts_array = (int *)H5MM_malloc((size_t)mpi_size * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive counts array")
-
- if (NULL == (displacements_array = (int *)H5MM_malloc((size_t)mpi_size * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive displacements array")
- } /* end if */
-
- /*
- * If gathering to all processors, inform each processor of how many entries each other processor is
- * contributing to the resulting array by collecting the counts into each processor's "receive counts"
- * array. Otherwise, inform only the root processor of how many entries each other processor is
- * contributing.
- */
- if (allgather) {
- if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&local_array_num_entries, 1, MPI_INT,
- receive_counts_array, 1, MPI_INT, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
- } /* end if */
- else {
- if (MPI_SUCCESS != (mpi_code = MPI_Gather(&local_array_num_entries, 1, MPI_INT,
- receive_counts_array, 1, MPI_INT, root, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code)
- } /* end else */
-
- if (allgather || (mpi_rank == root)) {
- size_t i;
-
- /* Multiply each receive count by the size of the array entry, since the data is sent as bytes. */
- for (i = 0; i < (size_t)mpi_size; i++)
- H5_CHECKED_ASSIGN(receive_counts_array[i], int,
- (size_t)receive_counts_array[i] * array_entry_size, size_t);
-
- /* Set receive buffer offsets for the collective MPI_Allgatherv/MPI_Gatherv call. */
- displacements_array[0] = 0;
- for (i = 1; i < (size_t)mpi_size; i++)
- displacements_array[i] = displacements_array[i - 1] + receive_counts_array[i - 1];
- } /* end if */
-
- /* As the data is sent as bytes, calculate the true sendcount for the data. */
- H5_CHECKED_ASSIGN(sendcount, int, local_array_num_entries *array_entry_size, size_t);
-
- if (allgather) {
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Allgatherv(local_array, sendcount, MPI_BYTE, gathered_array,
- receive_counts_array, displacements_array, MPI_BYTE, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allgatherv failed", mpi_code)
- } /* end if */
- else {
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Gatherv(local_array, sendcount, MPI_BYTE, gathered_array,
- receive_counts_array, displacements_array, MPI_BYTE, root, comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Gatherv failed", mpi_code)
- } /* end else */
-
- if (sort_func && (allgather || (mpi_rank == root)))
- HDqsort(gathered_array, gathered_array_num_entries, array_entry_size, sort_func);
- } /* end if */
-
- *_gathered_array = gathered_array;
- *_gathered_array_num_entries = gathered_array_num_entries;
-
-done:
- if (receive_counts_array)
- H5MM_free(receive_counts_array);
- if (displacements_array)
- H5MM_free(displacements_array);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__mpio_array_gatherv() */
-
-/*-------------------------------------------------------------------------
* Function: H5D__mpio_get_sum_chunk
*
* Purpose: Routine for obtaining total number of chunks to cover
@@ -793,11 +1097,17 @@ static herr_t
H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_chunk_map_t *fm)
{
H5FD_mpio_chunk_opt_t chunk_opt_mode;
- int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT;
- int sum_chunk = -1;
+#ifdef H5Dmpio_DEBUG
+ hbool_t log_file_flag = FALSE;
+ FILE * debug_log_file = NULL;
+#endif
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
htri_t temp_not_link_io = FALSE;
#endif
+ int io_option = H5D_MULTI_CHUNK_IO_MORE_OPT;
+ int sum_chunk = -1;
+ int mpi_rank;
+ int mpi_size;
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -808,9 +1118,35 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
HDassert(type_info);
HDassert(fm);
- /* Disable collective metadata reads for chunked dataset I/O operations
- * in order to prevent potential hangs */
- H5CX_set_coll_metadata_read(FALSE);
+ /* Obtain the current rank of the process and the number of ranks */
+ if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank")
+ if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI size")
+
+#ifdef H5Dmpio_DEBUG
+ /* Initialize file-level debugging if not initialized */
+ if (!H5D_mpio_debug_inited && H5D__mpio_debug_init() < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize H5Dmpio debugging")
+
+ /* Open file for debugging if necessary */
+ log_file_flag = H5D_mpio_debug_flags_s[(int)'f'];
+ if (log_file_flag) {
+ char debug_log_filename[1024];
+ time_t time_now;
+
+ HDsnprintf(debug_log_filename, 1024, "H5Dmpio_debug.rank%d", mpi_rank);
+
+ if (NULL == (debug_log_file = HDfopen(debug_log_filename, "a")))
+ HGOTO_ERROR(H5E_IO, H5E_OPENERROR, FAIL, "couldn't open debugging log file")
+
+ /* Print a short header for this I/O operation */
+ time_now = HDtime(NULL);
+ HDfprintf(debug_log_file, "##### %s", HDasctime(HDlocaltime(&time_now)));
+
+ debug_stream = debug_log_file;
+ }
+#endif
/* Check the optional property list for the collective chunk IO optimization option */
if (H5CX_get_mpio_chunk_opt_mode(&chunk_opt_mode) < 0)
@@ -824,13 +1160,10 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
/* via default path. branch by num threshold */
else {
unsigned one_link_chunk_io_threshold; /* Threshold to use single collective I/O for all chunks */
- int mpi_size; /* Number of processes in MPI job */
if (H5D__mpio_get_sum_chunk(io_info, fm, &sum_chunk) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL,
"unable to obtain the total chunk number of all processes");
- if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
/* Get the chunk optimization option threshold */
if (H5CX_get_mpio_chunk_opt_num(&one_link_chunk_io_threshold) < 0)
@@ -876,22 +1209,12 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
case H5D_ONE_LINK_CHUNK_IO_MORE_OPT:
/* Check if there are any filters in the pipeline */
if (io_info->dset->shared->dcpl_cache.pline.nused > 0) {
- /* For now, Multi-chunk IO must be forced for parallel filtered read,
- * so that data can be unfiltered as it is received. There is significant
- * complexity in unfiltering the data when it is read all at once into a
- * single buffer.
- */
- if (io_info->op_type == H5D_IO_OP_READ) {
- if (H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm) < 0)
- HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
- "couldn't finish optimized multiple filtered chunk MPI-IO")
- } /* end if */
- else if (H5D__link_chunk_filtered_collective_io(io_info, type_info, fm) < 0)
+ if (H5D__link_chunk_filtered_collective_io(io_info, type_info, fm, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish filtered linked chunk MPI-IO")
} /* end if */
else
/* Perform unfiltered link chunk collective IO */
- if (H5D__link_chunk_collective_io(io_info, type_info, fm, sum_chunk) < 0)
+ if (H5D__link_chunk_collective_io(io_info, type_info, fm, sum_chunk, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish linked chunk MPI-IO")
break;
@@ -899,18 +1222,28 @@ H5D__chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
default: /* multiple chunk IO via threshold */
/* Check if there are any filters in the pipeline */
if (io_info->dset->shared->dcpl_cache.pline.nused > 0) {
- if (H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm) < 0)
+ if (H5D__multi_chunk_filtered_collective_io(io_info, type_info, fm, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL,
"couldn't finish optimized multiple filtered chunk MPI-IO")
} /* end if */
else
/* Perform unfiltered multi chunk collective IO */
- if (H5D__multi_chunk_collective_io(io_info, type_info, fm) < 0)
+ if (H5D__multi_chunk_collective_io(io_info, type_info, fm, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish optimized multiple chunk MPI-IO")
break;
} /* end switch */
done:
+#ifdef H5Dmpio_DEBUG
+ /* Close debugging log file */
+ if (debug_log_file) {
+ HDfprintf(debug_log_file, "##############\n\n");
+ if (EOF == HDfclose(debug_log_file))
+ HDONE_ERROR(H5E_IO, H5E_CLOSEERROR, FAIL, "couldn't close debugging log file")
+ debug_stream = H5DEBUG(D);
+ }
+#endif
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__chunk_collective_io */
@@ -993,7 +1326,7 @@ done:
*/
static herr_t
H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_chunk_map_t *fm,
- int sum_chunk)
+ int sum_chunk, int mpi_rank, int mpi_size)
{
H5D_chunk_addr_info_t *chunk_addr_info_array = NULL;
MPI_Datatype chunk_final_mtype; /* Final memory MPI datatype for all chunks with selection */
@@ -1074,9 +1407,8 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
/* Set up the base storage address for this chunk */
io_info->store = &ctg_store;
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before inter_collective_io for total chunk = 1 \n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "before inter_collective_io for total chunk = 1");
#endif
/* Perform I/O */
@@ -1092,9 +1424,8 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
num_chunk = H5SL_count(fm->sel_chunks);
H5_CHECK_OVERFLOW(num_chunk, size_t, int);
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "total_chunks = %zu, num_chunk = %zu\n", total_chunks, num_chunk);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "total_chunks = %zu, num_chunk = %zu", total_chunks, num_chunk);
#endif
/* Set up MPI datatype for chunks selected */
@@ -1125,18 +1456,17 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
"couldn't allocate chunk file is derived datatype flags buffer")
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before sorting the chunk address \n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "before sorting chunk addresses");
#endif
+
/* Sort the chunk address */
- if (H5D__sort_chunk(io_info, fm, chunk_addr_info_array, sum_chunk) < 0)
+ if (H5D__sort_chunk(io_info, fm, chunk_addr_info_array, sum_chunk, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSWAP, FAIL, "unable to sort chunk address")
ctg_store.contig.dset_addr = chunk_addr_info_array[0].chunk_addr;
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "after sorting the chunk address \n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "after sorting chunk addresses");
#endif
/* Obtain MPI derived datatype from all individual chunks */
@@ -1241,9 +1571,9 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
/* No chunks selected for this process */
mpi_buf_count = (hsize_t)0;
} /* end else */
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before coming to final collective IO\n");
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "before coming to final collective I/O");
#endif
/* Set up the base storage address for this chunk */
@@ -1256,11 +1586,11 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
} /* end else */
done:
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before freeing memory inside H5D_link_collective_io ret_value = %d\n",
- ret_value);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "before freeing memory inside H5D_link_collective_io ret_value = %d",
+ ret_value);
#endif
+
/* Release resources */
if (chunk_addr_info_array)
H5MM_xfree(chunk_addr_info_array);
@@ -1293,68 +1623,89 @@ done:
/*-------------------------------------------------------------------------
* Function: H5D__link_chunk_filtered_collective_io
*
- * Purpose: Routine for one collective IO with one MPI derived datatype
- * to link with all filtered chunks
- *
- * 1. Construct a list of selected chunks in the collective IO
- * operation
- * A. If any chunk is being written to by more than 1
- * process, the process writing to the chunk which
- * currently has the least amount of chunks assigned
- * to it becomes the new owner (in the case of ties,
- * the lowest MPI rank becomes the new owner)
- * 2. If the operation is a write operation
- * A. Loop through each chunk in the operation
- * I. If this is not a full overwrite of the chunk
- * a) Read the chunk from file and pass the chunk
- * through the filter pipeline in reverse order
- * (Unfilter the chunk)
+ * Purpose: Performs collective I/O on filtered chunks by creating a
+ * single MPI derived datatype to link with all filtered
+ * chunks. The general algorithm is as follows:
+ *
+ * 1. Construct a list of selected chunks in the collective
+ * I/O operation
+ * 2. If the operation is a read operation
+ * A. Ensure that the list of chunks is sorted in
+ * monotonically non-decreasing order of chunk offset
+ * in the file
+ * B. Participate in a collective read of chunks from
+ * the file
+ * C. Loop through each selected chunk, unfiltering it and
+ * scattering the data to the application's read buffer
+ * 3. If the operation is a write operation
+ * A. Redistribute any chunks being written by more than 1
+ * MPI rank, such that the chunk is only owned by 1 MPI
+ * rank. The rank writing to the chunk which currently
+ * has the least amount of chunks assigned to it becomes
+ * the new owner (in the case of ties, the lowest MPI
+ * rank becomes the new owner)
+ * B. Participate in a collective read of chunks from the
+ * file
+ * C. Loop through each chunk selected in the operation
+ * and for each chunk:
+ * I. If we actually read the chunk from the file (if
+ * a chunk is being fully overwritten, we skip
+ * reading it), pass the chunk through the filter
+ * pipeline in reverse order (unfilter the chunk)
* II. Update the chunk data with the modifications from
- * the owning process
+ * the owning MPI rank
* III. Receive any modification data from other
- * processes and update the chunk data with these
+ * ranks and update the chunk data with those
* modifications
* IV. Filter the chunk
- * B. Contribute the modified chunks to an array gathered
- * by all processes which contains the new sizes of
- * every chunk modified in the collective IO operation
- * C. All processes collectively re-allocate each chunk
- * from the gathered array with their new sizes after
- * the filter operation
- * D. If this process has any chunks selected in the IO
- * operation, create an MPI derived type for memory and
- * file to write out the process' selected chunks to the
- * file
- * E. Perform the collective write
- * F. All processes collectively re-insert each modified
+ * D. Contribute the modified chunks to an array gathered
+ * by all ranks which contains information for
+ * re-allocating space in the file for every chunk
+ * modified. Then, each rank collectively re-allocates
+ * each chunk from the gathered array with their new
+ * sizes after the filter operation
+ * E. Proceed with the collective write operation for all
+ * the modified chunks
+ * F. Contribute the modified chunks to an array gathered
+ * by all ranks which contains information for
+ * re-inserting every chunk modified into the chunk
+ * index. Then, each rank collectively re-inserts each
* chunk from the gathered array into the chunk index
*
+ * TODO: Note that steps D. and F. here are both collective
+ * operations that partially share data from the
+ * H5D_filtered_collective_io_info_t structure. To
+ * try to conserve on memory a bit, the distributed
+ * arrays these operations create are discarded after
+ * each operation is performed. If memory consumption
+ * here proves to not be an issue, the necessary data
+ * for both operations could be combined into a single
+ * structure so that only one collective MPI operation
+ * is needed to carry out both operations, rather than
+ * two.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Jordan Henderson
- * Friday, Nov. 4th, 2016
- *
*-------------------------------------------------------------------------
*/
static herr_t
H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm)
+ H5D_chunk_map_t *fm, int mpi_rank, int mpi_size)
{
- H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
- H5D_filtered_collective_io_info_t *collective_chunk_list =
- NULL; /* The list of chunks used during collective operations */
- H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
- MPI_Datatype mem_type = MPI_BYTE;
- MPI_Datatype file_type = MPI_BYTE;
- hbool_t mem_type_is_derived = FALSE;
- hbool_t file_type_is_derived = FALSE;
- size_t chunk_list_num_entries;
- size_t collective_chunk_list_num_entries;
- size_t * num_chunks_selected_array = NULL; /* Array of number of chunks selected on each process */
- size_t i; /* Local index variable */
- int mpi_rank, mpi_size, mpi_code;
- herr_t ret_value = SUCCEED;
+ H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
+ H5D_filtered_collective_io_info_t *chunk_hash_table = NULL;
+ unsigned char ** chunk_msg_bufs = NULL;
+ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
+ MPI_Datatype mem_type = MPI_BYTE;
+ MPI_Datatype file_type = MPI_BYTE;
+ hbool_t mem_type_is_derived = FALSE;
+ hbool_t file_type_is_derived = FALSE;
+ size_t * rank_chunks_assigned_map = NULL;
+ size_t chunk_list_num_entries;
+ size_t i;
+ int chunk_msg_bufs_len = 0;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -1362,11 +1713,12 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
HDassert(type_info);
HDassert(fm);
- /* Obtain the current rank of the process and the number of processes */
- if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
- if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_DEBUG_VA(mpi_rank, "Performing Linked-chunk I/O (%s) with MPI Comm size of %d",
+ io_info->op_type == H5D_IO_OP_WRITE ? "write" : "read", mpi_size);
+ H5D_MPIO_TIME_START(mpi_rank, "Linked-chunk I/O");
+#endif
/* Set the actual-chunk-opt-mode property. */
H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_LINK_CHUNK);
@@ -1377,123 +1729,127 @@ H5D__link_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_in
H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE);
/* Build a list of selected chunks in the collective io operation */
- if (H5D__construct_filtered_io_info_list(io_info, type_info, fm, &chunk_list, &chunk_list_num_entries) <
- 0)
+ if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, type_info, fm, &chunk_list,
+ &chunk_list_num_entries, mpi_rank) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list")
- if (io_info->op_type == H5D_IO_OP_WRITE) { /* Filtered collective write */
+ if (io_info->op_type == H5D_IO_OP_READ) { /* Filtered collective read */
+ if (H5D__mpio_collective_filtered_chunk_read(chunk_list, chunk_list_num_entries, io_info, type_info,
+ mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks")
+ }
+ else { /* Filtered collective write */
H5D_chk_idx_info_t index_info;
- H5D_chunk_ud_t udata;
hsize_t mpi_buf_count;
- /* Construct chunked index info */
- index_info.f = io_info->dset->oloc.file;
- index_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
- index_info.layout = &(io_info->dset->shared->layout.u.chunk);
- index_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
-
- /* Set up chunk information for insertion to chunk index */
- udata.common.layout = index_info.layout;
- udata.common.storage = index_info.storage;
- udata.filter_mask = 0;
-
- /* Iterate through all the chunks in the collective write operation,
- * updating each chunk with the data modifications from other processes,
- * then re-filtering the chunk.
+ H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, io_info);
+
+ if (mpi_size > 1) {
+ /* Redistribute shared chunks being written to */
+ if (H5D__mpio_redistribute_shared_chunks(chunk_list, chunk_list_num_entries, io_info, fm,
+ mpi_rank, mpi_size, &rank_chunks_assigned_map) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks")
+
+ /* Send any chunk modification messages for chunks this rank no longer owns */
+ if (H5D__mpio_share_chunk_modification_data(chunk_list, &chunk_list_num_entries, io_info,
+ type_info, mpi_rank, mpi_size, &chunk_hash_table,
+ &chunk_msg_bufs, &chunk_msg_bufs_len) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "unable to send chunk modification data between MPI ranks")
+
+ /* Make sure the local chunk list was updated correctly */
+ HDassert(chunk_list_num_entries == rank_chunks_assigned_map[mpi_rank]);
+ }
+
+ /* Proceed to update all the chunks this rank owns with its own
+ * modification data and data from other ranks, before re-filtering
+ * the chunks. As chunk reads are done collectively here, all ranks
+ * must participate.
*/
- for (i = 0; i < chunk_list_num_entries; i++)
- if (mpi_rank == chunk_list[i].owners.new_owner)
- if (H5D__filtered_collective_chunk_entry_io(&chunk_list[i], io_info, type_info, fm) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't process chunk entry")
-
- /* Gather the new chunk sizes to all processes for a collective reallocation
- * of the chunks in the file.
- */
- if (H5D__mpio_array_gatherv(chunk_list, chunk_list_num_entries,
- sizeof(H5D_filtered_collective_io_info_t),
- (void **)&collective_chunk_list, &collective_chunk_list_num_entries, true,
- 0, io_info->comm, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes")
-
- /* Collectively re-allocate the modified chunks (from each process) in the file */
- for (i = 0; i < collective_chunk_list_num_entries; i++) {
- hbool_t insert;
-
- if (H5D__chunk_file_alloc(&index_info, &collective_chunk_list[i].chunk_states.chunk_current,
- &collective_chunk_list[i].chunk_states.new_chunk, &insert,
- collective_chunk_list[i].scaled) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
- } /* end for */
-
- if (NULL == (num_chunks_selected_array = (size_t *)H5MM_malloc((size_t)mpi_size * sizeof(size_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate num chunks selected array")
-
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Allgather(&chunk_list_num_entries, 1, MPI_UNSIGNED_LONG_LONG,
- num_chunks_selected_array, 1, MPI_UNSIGNED_LONG_LONG, io_info->comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
-
- /* If this process has any chunks selected, create a MPI type for collectively
- * writing out the chunks to file. Otherwise, the process contributes to the
+ if (H5D__mpio_collective_filtered_chunk_update(chunk_list, chunk_list_num_entries, chunk_hash_table,
+ chunk_msg_bufs, chunk_msg_bufs_len, io_info, type_info,
+ mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks")
+
+ /* Free up resources used by chunk hash table now that we're done updating chunks */
+ HASH_CLEAR(hh, chunk_hash_table);
+
+ /* All ranks now collectively re-allocate file space for all chunks */
+ if (H5D__mpio_collective_filtered_chunk_reallocate(chunk_list, chunk_list_num_entries,
+ rank_chunks_assigned_map, io_info, &index_info,
+ mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "couldn't collectively re-allocate file space for chunks")
+
+ /* If this rank has any chunks selected, create a MPI type for collectively
+ * writing out the chunks to file. Otherwise, the rank contributes to the
* collective write with a none type.
*/
- if (chunk_list_num_entries) {
- size_t offset;
-
- /* During the collective re-allocation of chunks in the file, the record for each
- * chunk is only updated in the collective array, not in the local copy of chunks on each
- * process. However, each process needs the updated chunk records so that they can create
- * a MPI type for the collective write that will write to the chunk's possible new locations
- * in the file instead of the old ones. This ugly hack seems to be the best solution to
- * copy the information back to the local array and avoid having to modify the collective
- * write type function in an ugly way so that it will accept the collective array instead
- * of the local array. This works correctly because the array gather function guarantees
- * that the chunk data in the collective array is ordered in blocks by rank.
- */
- for (i = 0, offset = 0; i < (size_t)mpi_rank; i++)
- offset += num_chunks_selected_array[i];
-
- H5MM_memcpy(chunk_list, &collective_chunk_list[offset],
- num_chunks_selected_array[mpi_rank] * sizeof(H5D_filtered_collective_io_info_t));
+ if (H5D__mpio_collective_filtered_io_type(chunk_list, chunk_list_num_entries, io_info->op_type,
+ &mem_type, &mem_type_is_derived, &file_type,
+ &file_type_is_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "couldn't create MPI type for writing filtered chunks")
- /* Create single MPI type encompassing each selection in the dataspace */
- if (H5D__mpio_filtered_collective_write_type(chunk_list, chunk_list_num_entries, &mem_type,
- &mem_type_is_derived, &file_type,
- &file_type_is_derived) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "couldn't create MPI link chunk I/O type")
+ mpi_buf_count = (file_type_is_derived || mem_type_is_derived) ? 1 : 0;
- /* Override the write buffer to point to the address of the first
- * chunk data buffer
+ /* Setup contig storage info for I/O operation */
+ if (chunk_list_num_entries) {
+ /*
+ * Override the write buffer to point to the first
+ * chunk's data buffer
*/
io_info->u.wbuf = chunk_list[0].buf;
- } /* end if */
- /* We have a single, complicated MPI datatype for both memory & file */
- mpi_buf_count = (mem_type_is_derived && file_type_is_derived) ? (hsize_t)1 : (hsize_t)0;
-
- /* Set up the base storage address for this operation */
- ctg_store.contig.dset_addr = 0; /* Write address must be set to address 0 */
- io_info->store = &ctg_store;
+ /*
+ * Setup the base storage address for this operation
+ * to be the first chunk's file address
+ */
+ ctg_store.contig.dset_addr = chunk_list[0].chunk_new.offset;
+ }
+ else
+ ctg_store.contig.dset_addr = 0;
/* Perform I/O */
+ io_info->store = &ctg_store;
if (H5D__final_collective_io(io_info, type_info, mpi_buf_count, file_type, mem_type) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO")
+ /* Free up resources in anticipation of following collective operation */
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ if (chunk_list[i].buf) {
+ H5MM_free(chunk_list[i].buf);
+ chunk_list[i].buf = NULL;
+ }
+ }
+
/* Participate in the collective re-insertion of all chunks modified
- * in this iteration into the chunk index
+ * into the chunk index
*/
- for (i = 0; i < collective_chunk_list_num_entries; i++) {
- udata.chunk_block = collective_chunk_list[i].chunk_states.new_chunk;
- udata.common.scaled = collective_chunk_list[i].scaled;
- udata.chunk_idx = collective_chunk_list[i].index;
-
- if ((index_info.storage->ops->insert)(&index_info, &udata, io_info->dset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk address into index")
- } /* end for */
- } /* end if */
+ if (H5D__mpio_collective_filtered_chunk_reinsert(chunk_list, chunk_list_num_entries,
+ rank_chunks_assigned_map, io_info, &index_info,
+ mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "couldn't collectively re-insert modified chunks into chunk index")
+ }
done:
- /* Free resources used by a process which had some selection */
+ /* Free the MPI buf and file types, if they were derived */
+ if (mem_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if (file_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+
+ if (chunk_msg_bufs) {
+ for (i = 0; i < (size_t)chunk_msg_bufs_len; i++)
+ H5MM_free(chunk_msg_bufs[i]);
+
+ H5MM_free(chunk_msg_bufs);
+ }
+
+ HASH_CLEAR(hh, chunk_hash_table);
+
+ /* Free resources used by a rank which had some selection */
if (chunk_list) {
for (i = 0; i < chunk_list_num_entries; i++)
if (chunk_list[i].buf)
@@ -1502,16 +1858,13 @@ done:
H5MM_free(chunk_list);
} /* end if */
- if (num_chunks_selected_array)
- H5MM_free(num_chunks_selected_array);
- if (collective_chunk_list)
- H5MM_free(collective_chunk_list);
+ if (rank_chunks_assigned_map)
+ H5MM_free(rank_chunks_assigned_map);
- /* Free the MPI buf and file types, if they were derived */
- if (mem_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- if (file_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__link_chunk_filtered_collective_io() */
@@ -1534,7 +1887,8 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_chunk_map_t *fm)
+H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_chunk_map_t *fm,
+ int mpi_rank, int mpi_size)
{
H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
@@ -1547,11 +1901,8 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
H5FD_mpio_collective_opt_t last_coll_opt_mode =
H5FD_MPIO_COLLECTIVE_IO; /* Last parallel transfer with independent IO or collective IO with this mode
*/
- size_t total_chunk; /* Total # of chunks in dataset */
-#ifdef H5Dmpio_DEBUG
- int mpi_rank;
-#endif
- size_t u; /* Local index variable */
+ size_t total_chunk; /* Total # of chunks in dataset */
+ size_t u; /* Local index variable */
H5D_mpio_actual_io_mode_t actual_io_mode =
H5D_MPIO_NO_COLLECTIVE; /* Local variable for tracking the I/O mode used. */
herr_t ret_value = SUCCEED;
@@ -1561,10 +1912,6 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
/* Set the actual chunk opt mode property */
H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_MULTI_CHUNK);
-#ifdef H5Dmpio_DEBUG
- mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file);
-#endif
-
/* Retrieve total # of chunks in dataset */
H5_CHECKED_ASSIGN(total_chunk, size_t, fm->layout->u.chunk.nchunks, hsize_t);
HDassert(total_chunk != 0);
@@ -1572,13 +1919,13 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
/* Allocate memories */
chunk_io_option = (uint8_t *)H5MM_calloc(total_chunk);
chunk_addr = (haddr_t *)H5MM_calloc(total_chunk * sizeof(haddr_t));
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "total_chunk %zu\n", total_chunk);
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "total_chunk %zu", total_chunk);
#endif
/* Obtain IO option for each chunk */
- if (H5D__obtain_mpio_mode(io_info, fm, chunk_io_option, chunk_addr) < 0)
+ if (H5D__obtain_mpio_mode(io_info, fm, chunk_io_option, chunk_addr, mpi_rank, mpi_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTRECV, FAIL, "unable to obtain MPIO mode")
/* Set up contiguous I/O info object */
@@ -1606,9 +1953,8 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
H5S_t * fspace; /* Dataspace describing chunk & selection in it */
H5S_t * mspace; /* Dataspace describing selection in memory corresponding to this chunk */
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "mpi_rank = %d, chunk index = %zu\n", mpi_rank, u);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "mpi_rank = %d, chunk index = %zu", mpi_rank, u);
#endif
/* Get the chunk info for this chunk, if there are elements selected */
chunk_info = fm->select_chunk[u];
@@ -1626,10 +1972,9 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
* needs to contribute MPI NONE TYPE.
*/
if (chunk_io_option[u] == H5D_CHUNK_IO_MODE_COL) {
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "inside collective chunk IO mpi_rank = %d, chunk index = %zu\n",
- mpi_rank, u);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "inside collective chunk IO mpi_rank = %d, chunk index = %zu",
+ mpi_rank, u);
#endif
/* Set the file & memory dataspaces */
@@ -1665,10 +2010,9 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
} /* end if */
else { /* possible independent IO for this chunk */
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "inside independent IO mpi_rank = %d, chunk index = %zu\n", mpi_rank,
- u);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "inside independent IO mpi_rank = %d, chunk index = %zu", mpi_rank,
+ u);
#endif
HDassert(chunk_io_option[u] == 0);
@@ -1698,9 +2042,8 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
/* Perform the I/O */
if (H5D__inter_collective_io(&ctg_io_info, type_info, fspace, mspace) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish shared collective MPI-IO")
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "after inter collective IO\n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "after inter collective IO");
#endif
} /* end else */
} /* end for */
@@ -1720,80 +2063,101 @@ done:
/*-------------------------------------------------------------------------
* Function: H5D__multi_chunk_filtered_collective_io
*
- * Purpose: To do filtered collective IO iteratively to save on memory.
- * While link_chunk_filtered_collective_io will construct and
- * work on a list of all of the chunks selected in the IO
- * operation at once, this function works iteratively on a set
- * of chunks at a time; at most one chunk per rank per
- * iteration.
- *
- * 1. Construct a list of selected chunks in the collective IO
- * operation
- * A. If any chunk is being written to by more than 1
- * process, the process writing to the chunk which
- * currently has the least amount of chunks assigned
- * to it becomes the new owner (in the case of ties,
- * the lowest MPI rank becomes the new owner)
- * 2. If the operation is a read operation
- * A. Loop through each chunk in the operation
- * I. Read the chunk from the file
- * II. Unfilter the chunk
- * III. Scatter the read chunk data to the user's buffer
- * 3. If the operation is a write operation
- * A. Loop through each chunk in the operation
- * I. If this is not a full overwrite of the chunk
- * a) Read the chunk from file and pass the chunk
- * through the filter pipeline in reverse order
- * (Unfilter the chunk)
- * II. Update the chunk data with the modifications from
- * the owning process
- * III. Receive any modification data from other
- * processes and update the chunk data with these
- * modifications
- * IV. Filter the chunk
- * V. Contribute the chunk to an array gathered by
- * all processes which contains every chunk
- * modified in this iteration (up to one chunk
- * per process, some processes may not have a
- * selection/may have less chunks to work on than
- * other processes)
- * VI. All processes collectively re-allocate each
- * chunk from the gathered array with their new
- * sizes after the filter operation
- * VII. Proceed with the collective write operation
- * for the chunks modified on this iteration
- * VIII. All processes collectively re-insert each
- * chunk from the gathered array into the chunk
- * index
+ * Purpose: Performs collective I/O on filtered chunks iteratively to
+ * save on memory and potentially get better performance
+ * depending on the average number of chunks per rank. While
+ * linked-chunk I/O will construct and work on a list of all
+ * of the chunks selected in the I/O operation at once, this
+ * function works iteratively on a set of chunks at a time; at
+ * most one chunk per rank per iteration. The general
+ * algorithm is as follows:
+ *
+ * 1. Construct a list of selected chunks in the collective
+ * I/O operation
+ * 2. If the operation is a read operation, loop an amount of
+ * times equal to the maximum number of chunks selected on
+ * any particular rank and on each iteration:
+ * A. Participate in a collective read of chunks from
+ * the file (ranks that run out of chunks still need
+ * to participate)
+ * B. Unfilter the chunk that was read (if any)
+ * C. Scatter the read chunk's data to the application's
+ * read buffer
+ * 3. If the operation is a write operation, redistribute any
+ * chunks being written to by more than 1 MPI rank, such
+ * that the chunk is only owned by 1 MPI rank. The rank
+ * writing to the chunk which currently has the least
+ * amount of chunks assigned to it becomes the new owner
+ * (in the case of ties, the lowest MPI rank becomes the
+ * new owner). Then, loop an amount of times equal to the
+ * maximum number of chunks selected on any particular
+ * rank and on each iteration:
+ * A. Participate in a collective read of chunks from
+ * the file (ranks that run out of chunks still need
+ * to participate)
+ * I. If we actually read a chunk from the file (if
+ * a chunk is being fully overwritten, we skip
+ * reading it), pass the chunk through the filter
+ * pipeline in reverse order (unfilter the chunk)
+ * B. Update the chunk data with the modifications from
+ * the owning rank
+ * C. Receive any modification data from other ranks and
+ * update the chunk data with those modifications
+ * D. Filter the chunk
+ * E. Contribute the chunk to an array gathered by
+ * all ranks which contains information for
+ * re-allocating space in the file for every chunk
+ * modified in this iteration (up to one chunk per
+ * rank; some ranks may not have a selection/may have
+ * less chunks to work on than other ranks). Then,
+ * each rank collectively re-allocates each chunk
+ * from the gathered array with their new sizes
+ * after the filter operation
+ * F. Proceed with the collective write operation
+ * for the chunks modified on this iteration
+ * G. Contribute the chunk to an array gathered by
+ * all ranks which contains information for
+ * re-inserting every chunk modified on this
+ * iteration into the chunk index. Then, each rank
+ * collectively re-inserts each chunk from the
+ * gathered array into the chunk index
+ *
+ * TODO: Note that steps E. and G. here are both collective
+ * operations that partially share data from the
+ * H5D_filtered_collective_io_info_t structure. To
+ * try to conserve on memory a bit, the distributed
+ * arrays these operations create are discarded after
+ * each operation is performed. If memory consumption
+ * here proves to not be an issue, the necessary data
+ * for both operations could be combined into a single
+ * structure so that only one collective MPI operation
+ * is needed to carry out both operations, rather than
+ * two.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Jordan Henderson
- * Friday, Dec. 2nd, 2016
- *
*-------------------------------------------------------------------------
*/
static herr_t
H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- H5D_chunk_map_t *fm)
+ H5D_chunk_map_t *fm, int mpi_rank, int mpi_size)
{
- H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
- H5D_filtered_collective_io_info_t *collective_chunk_list =
- NULL; /* The list of chunks used during collective operations */
- H5D_storage_t store; /* union of EFL and chunk pointer in file space */
- H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
- H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
- MPI_Datatype *file_type_array = NULL;
- MPI_Datatype *mem_type_array = NULL;
- hbool_t * file_type_is_derived_array = NULL;
- hbool_t * mem_type_is_derived_array = NULL;
- hbool_t * has_chunk_selected_array =
- NULL; /* Array of whether or not each process is contributing a chunk to each iteration */
- size_t chunk_list_num_entries;
- size_t collective_chunk_list_num_entries;
- size_t i, j; /* Local index variable */
- int mpi_rank, mpi_size, mpi_code;
- herr_t ret_value = SUCCEED;
+ H5D_filtered_collective_io_info_t *chunk_list = NULL; /* The list of chunks being read/written */
+ H5D_filtered_collective_io_info_t *chunk_hash_table = NULL;
+ unsigned char ** chunk_msg_bufs = NULL;
+ H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */
+ H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */
+ MPI_Datatype mem_type = MPI_BYTE;
+ MPI_Datatype file_type = MPI_BYTE;
+ hbool_t mem_type_is_derived = FALSE;
+ hbool_t file_type_is_derived = FALSE;
+ hbool_t have_chunk_to_process;
+ size_t chunk_list_num_entries;
+ size_t i;
+ size_t max_num_chunks;
+ int chunk_msg_bufs_len = 0;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -1801,11 +2165,12 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
HDassert(type_info);
HDassert(fm);
- /* Obtain the current rank of the process and the number of processes */
- if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
- if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_DEBUG_VA(mpi_rank, "Performing Multi-chunk I/O (%s) with MPI Comm size of %d",
+ io_info->op_type == H5D_IO_OP_WRITE ? "write" : "read", mpi_size);
+ H5D_MPIO_TIME_START(mpi_rank, "Multi-chunk I/O");
+#endif
/* Set the actual chunk opt mode property */
H5CX_set_mpio_actual_chunk_opt(H5D_MPIO_MULTI_CHUNK);
@@ -1816,10 +2181,19 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
H5CX_set_mpio_actual_io_mode(H5D_MPIO_CHUNK_COLLECTIVE);
/* Build a list of selected chunks in the collective IO operation */
- if (H5D__construct_filtered_io_info_list(io_info, type_info, fm, &chunk_list, &chunk_list_num_entries) <
- 0)
+ if (H5D__mpio_collective_filtered_chunk_io_setup(io_info, type_info, fm, &chunk_list,
+ &chunk_list_num_entries, mpi_rank) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't construct filtered I/O info list")
+ /* Retrieve the maximum number of chunks selected for any rank */
+ if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&chunk_list_num_entries, &max_num_chunks, 1,
+ MPI_UNSIGNED_LONG_LONG, MPI_MAX, io_info->comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code)
+
+ /* If no one has anything selected at all, end the operation */
+ if (0 == max_num_chunks)
+ HGOTO_DONE(SUCCEED);
+
/* Set up contiguous I/O info object */
H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info));
ctg_io_info.store = &ctg_store;
@@ -1827,190 +2201,147 @@ H5D__multi_chunk_filtered_collective_io(H5D_io_info_t *io_info, const H5D_type_i
/* Initialize temporary contiguous storage info */
ctg_store.contig.dset_size = (hsize_t)io_info->dset->shared->layout.u.chunk.size;
- ctg_store.contig.dset_addr = 0;
-
- /* Set dataset storage for I/O info */
- io_info->store = &store;
if (io_info->op_type == H5D_IO_OP_READ) { /* Filtered collective read */
- for (i = 0; i < chunk_list_num_entries; i++)
- if (H5D__filtered_collective_chunk_entry_io(&chunk_list[i], io_info, type_info, fm) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't process chunk entry")
- } /* end if */
+ for (i = 0; i < max_num_chunks; i++) {
+ /* Check if this rank has a chunk to work on for this iteration */
+ have_chunk_to_process = (i < chunk_list_num_entries);
+
+ if (H5D__mpio_collective_filtered_chunk_read(have_chunk_to_process ? &chunk_list[i] : NULL,
+ have_chunk_to_process ? 1 : 0, io_info, type_info,
+ mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't read filtered chunks")
+
+ if (have_chunk_to_process && chunk_list[i].buf) {
+ H5MM_free(chunk_list[i].buf);
+ chunk_list[i].buf = NULL;
+ }
+ }
+ }
else { /* Filtered collective write */
H5D_chk_idx_info_t index_info;
- H5D_chunk_ud_t udata;
- size_t max_num_chunks;
hsize_t mpi_buf_count;
/* Construct chunked index info */
- index_info.f = io_info->dset->oloc.file;
- index_info.pline = &(io_info->dset->shared->dcpl_cache.pline);
- index_info.layout = &(io_info->dset->shared->layout.u.chunk);
- index_info.storage = &(io_info->dset->shared->layout.storage.u.chunk);
-
- /* Set up chunk information for insertion to chunk index */
- udata.common.layout = index_info.layout;
- udata.common.storage = index_info.storage;
- udata.filter_mask = 0;
-
- /* Retrieve the maximum number of chunks being written among all processes */
- if (MPI_SUCCESS != (mpi_code = MPI_Allreduce(&chunk_list_num_entries, &max_num_chunks, 1,
- MPI_UNSIGNED_LONG_LONG, MPI_MAX, io_info->comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allreduce failed", mpi_code)
-
- /* If no one is writing anything at all, end the operation */
- if (!(max_num_chunks > 0))
- HGOTO_DONE(SUCCEED);
-
- /* Allocate arrays for storing MPI file and mem types and whether or not the
- * types were derived.
- */
- if (NULL == (file_type_array = (MPI_Datatype *)H5MM_malloc(max_num_chunks * sizeof(MPI_Datatype))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate file type array")
-
- if (NULL == (file_type_is_derived_array = (hbool_t *)H5MM_calloc(max_num_chunks * sizeof(hbool_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate file type is derived array")
-
- if (NULL == (mem_type_array = (MPI_Datatype *)H5MM_malloc(max_num_chunks * sizeof(MPI_Datatype))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate mem type array")
-
- if (NULL == (mem_type_is_derived_array = (hbool_t *)H5MM_calloc(max_num_chunks * sizeof(hbool_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate mem type is derived array")
-
- /* Iterate over the max number of chunks among all processes, as this process could
- * have no chunks left to work on, but it still needs to participate in the collective
- * re-allocation and re-insertion of chunks modified by other processes.
+ H5D_MPIO_INIT_CHUNK_IDX_INFO(index_info, io_info);
+
+ if (mpi_size > 1) {
+ /* Redistribute shared chunks being written to */
+ if (H5D__mpio_redistribute_shared_chunks(chunk_list, chunk_list_num_entries, io_info, fm,
+ mpi_rank, mpi_size, NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks")
+
+ /* Send any chunk modification messages for chunks this rank no longer owns */
+ if (H5D__mpio_share_chunk_modification_data(chunk_list, &chunk_list_num_entries, io_info,
+ type_info, mpi_rank, mpi_size, &chunk_hash_table,
+ &chunk_msg_bufs, &chunk_msg_bufs_len) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "unable to send chunk modification data between MPI ranks")
+ }
+
+ /* Iterate over the max number of chunks among all ranks, as this rank could
+ * have no chunks left to work on, but it still needs to participate in the
+ * collective re-allocation and re-insertion of chunks modified by other ranks.
*/
for (i = 0; i < max_num_chunks; i++) {
- /* Check if this process has a chunk to work on for this iteration */
- hbool_t have_chunk_to_process =
- (i < chunk_list_num_entries) && (mpi_rank == chunk_list[i].owners.new_owner);
-
- if (have_chunk_to_process)
- if (H5D__filtered_collective_chunk_entry_io(&chunk_list[i], io_info, type_info, fm) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't process chunk entry")
+ /* Check if this rank has a chunk to work on for this iteration */
+ have_chunk_to_process = (i < chunk_list_num_entries) && (mpi_rank == chunk_list[i].new_owner);
- /* Gather the new chunk sizes to all processes for a collective re-allocation
- * of the chunks in the file
+ /* Proceed to update the chunk this rank owns (if any left) with its
+ * own modification data and data from other ranks, before re-filtering
+ * the chunks. As chunk reads are done collectively here, all ranks
+ * must participate.
*/
- if (H5D__mpio_array_gatherv(&chunk_list[i], have_chunk_to_process ? 1 : 0,
- sizeof(H5D_filtered_collective_io_info_t),
- (void **)&collective_chunk_list, &collective_chunk_list_num_entries,
- true, 0, io_info->comm, NULL) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather new chunk sizes")
-
- /* Participate in the collective re-allocation of all chunks modified
- * in this iteration.
+ if (H5D__mpio_collective_filtered_chunk_update(have_chunk_to_process ? &chunk_list[i] : NULL,
+ have_chunk_to_process ? 1 : 0, chunk_hash_table,
+ chunk_msg_bufs, chunk_msg_bufs_len, io_info,
+ type_info, mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't update modified chunks")
+
+ /* All ranks now collectively re-allocate file space for all chunks */
+ if (H5D__mpio_collective_filtered_chunk_reallocate(have_chunk_to_process ? &chunk_list[i] : NULL,
+ have_chunk_to_process ? 1 : 0, NULL, io_info,
+ &index_info, mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "couldn't collectively re-allocate file space for chunks")
+
+ /*
+ * If this rank has a chunk to work on, create a MPI type
+ * for writing out the chunk. Otherwise, the rank will
+ * use MPI_BYTE for the file and memory type and specify
+ * a count of 0.
*/
- for (j = 0; j < collective_chunk_list_num_entries; j++) {
- hbool_t insert = FALSE;
+ if (H5D__mpio_collective_filtered_io_type(
+ have_chunk_to_process ? &chunk_list[i] : NULL, have_chunk_to_process ? 1 : 0,
+ io_info->op_type, &mem_type, &mem_type_is_derived, &file_type, &file_type_is_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "couldn't create MPI type for writing filtered chunks")
- if (H5D__chunk_file_alloc(&index_info, &collective_chunk_list[j].chunk_states.chunk_current,
- &collective_chunk_list[j].chunk_states.new_chunk, &insert,
- chunk_list[j].scaled) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
- } /* end for */
-
- if (NULL ==
- (has_chunk_selected_array = (hbool_t *)H5MM_malloc((size_t)mpi_size * sizeof(hbool_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate num chunks selected array")
-
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Allgather(&have_chunk_to_process, 1, MPI_C_BOOL, has_chunk_selected_array, 1,
- MPI_C_BOOL, io_info->comm)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
+ mpi_buf_count = (file_type_is_derived || mem_type_is_derived) ? 1 : 0;
- /* If this process has a chunk to work on, create a MPI type for the
- * memory and file for writing out the chunk
- */
+ /* Override the write buffer to point to the chunk data buffer */
if (have_chunk_to_process) {
- size_t offset;
- int mpi_type_count;
-
- for (j = 0, offset = 0; j < (size_t)mpi_rank; j++)
- offset += has_chunk_selected_array[j];
-
- /* Collect the new chunk info back to the local copy, since only the record in the
- * collective array gets updated by the chunk re-allocation */
- H5MM_memcpy(&chunk_list[i].chunk_states.new_chunk,
- &collective_chunk_list[offset].chunk_states.new_chunk,
- sizeof(chunk_list[i].chunk_states.new_chunk));
-
- H5_CHECKED_ASSIGN(mpi_type_count, int, chunk_list[i].chunk_states.new_chunk.length, hsize_t);
-
- /* Create MPI memory type for writing to chunk */
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Type_contiguous(mpi_type_count, MPI_BYTE, &mem_type_array[i])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
- if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type_array[i])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- mem_type_is_derived_array[i] = TRUE;
-
- /* Create MPI file type for writing to chunk */
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Type_contiguous(mpi_type_count, MPI_BYTE, &file_type_array[i])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
- if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type_array[i])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- file_type_is_derived_array[i] = TRUE;
-
- mpi_buf_count = 1;
-
- /* Set up the base storage address for this operation */
- ctg_store.contig.dset_addr = chunk_list[i].chunk_states.new_chunk.offset;
-
- /* Override the write buffer to point to the address of the
- * chunk data buffer
+ /*
+ * Override the write buffer to point to the
+ * chunk's data buffer
*/
ctg_io_info.u.wbuf = chunk_list[i].buf;
- } /* end if */
- else {
- mem_type_array[i] = file_type_array[i] = MPI_BYTE;
- mpi_buf_count = 0;
- } /* end else */
+
+ /*
+ * Setup the base storage address for this
+ * operation to be the chunk's file address
+ */
+ ctg_store.contig.dset_addr = chunk_list[i].chunk_new.offset;
+ }
+ else
+ ctg_store.contig.dset_addr = 0;
/* Perform the I/O */
- if (H5D__final_collective_io(&ctg_io_info, type_info, mpi_buf_count, file_type_array[i],
- mem_type_array[i]) < 0)
+ if (H5D__final_collective_io(&ctg_io_info, type_info, mpi_buf_count, file_type, mem_type) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "couldn't finish MPI-IO")
+ /* Free up resources in anticipation of following collective operation */
+ if (have_chunk_to_process && chunk_list[i].buf) {
+ H5MM_free(chunk_list[i].buf);
+ chunk_list[i].buf = NULL;
+ }
+
/* Participate in the collective re-insertion of all chunks modified
* in this iteration into the chunk index
*/
- for (j = 0; j < collective_chunk_list_num_entries; j++) {
- udata.chunk_block = collective_chunk_list[j].chunk_states.new_chunk;
- udata.common.scaled = collective_chunk_list[j].scaled;
- udata.chunk_idx = collective_chunk_list[j].index;
-
- if ((index_info.storage->ops->insert)(&index_info, &udata, io_info->dset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL,
- "unable to insert chunk address into index")
- } /* end for */
+ if (H5D__mpio_collective_filtered_chunk_reinsert(have_chunk_to_process ? &chunk_list[i] : NULL,
+ have_chunk_to_process ? 1 : 0, NULL, io_info,
+ &index_info, mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "couldn't collectively re-insert modified chunks into chunk index")
+
+ /* Free the MPI types, if they were derived */
+ if (mem_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ mem_type_is_derived = FALSE;
+ if (file_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ file_type_is_derived = FALSE;
+ } /* end for */
+ }
- if (collective_chunk_list) {
- H5MM_free(collective_chunk_list);
- collective_chunk_list = NULL;
- } /* end if */
- if (has_chunk_selected_array) {
- H5MM_free(has_chunk_selected_array);
- has_chunk_selected_array = NULL;
- } /* end if */
- } /* end for */
+done:
+ /* Free the MPI buf and file types, if they were derived */
+ if (mem_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if (file_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- /* Free the MPI file and memory types, if they were derived */
- for (i = 0; i < max_num_chunks; i++) {
- if (file_type_is_derived_array[i])
- if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type_array[i])))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if (chunk_msg_bufs) {
+ for (i = 0; i < (size_t)chunk_msg_bufs_len; i++)
+ H5MM_free(chunk_msg_bufs[i]);
- if (mem_type_is_derived_array[i])
- if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type_array[i])))
- HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
- } /* end for */
- } /* end else */
+ H5MM_free(chunk_msg_bufs);
+ }
-done:
+ HASH_CLEAR(hh, chunk_hash_table);
+
+ /* Free resources used by a rank which had some selection */
if (chunk_list) {
for (i = 0; i < chunk_list_num_entries; i++)
if (chunk_list[i].buf)
@@ -2019,16 +2350,10 @@ done:
H5MM_free(chunk_list);
} /* end if */
- if (collective_chunk_list)
- H5MM_free(collective_chunk_list);
- if (file_type_array)
- H5MM_free(file_type_array);
- if (mem_type_array)
- H5MM_free(mem_type_array);
- if (file_type_is_derived_array)
- H5MM_free(file_type_is_derived_array);
- if (mem_type_is_derived_array)
- H5MM_free(mem_type_is_derived_array);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__multi_chunk_filtered_collective_io() */
@@ -2054,11 +2379,22 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
hbool_t mbt_is_derived = FALSE;
hbool_t mft_is_derived = FALSE;
MPI_Datatype mpi_file_type, mpi_buf_type;
- int mpi_code; /* MPI return code */
- herr_t ret_value = SUCCEED; /* return value */
+ int mpi_code; /* MPI return code */
+#ifdef H5Dmpio_DEBUG
+ int mpi_rank;
+#endif
+ herr_t ret_value = SUCCEED; /* return value */
FUNC_ENTER_STATIC
+#ifdef H5Dmpio_DEBUG
+ mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file);
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Inter collective I/O");
+ if (mpi_rank < 0)
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank")
+#endif
+
if ((file_space != NULL) && (mem_space != NULL)) {
int mpi_file_count; /* Number of file "objects" to transfer */
hsize_t *permute_map = NULL; /* array that holds the mapping from the old,
@@ -2117,9 +2453,8 @@ H5D__inter_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
mft_is_derived = FALSE;
} /* end else */
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before final collective IO \n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "before final collective I/O");
#endif
/* Perform final collective I/O operation */
@@ -2133,9 +2468,10 @@ done:
if (mft_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mpi_file_type)))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before leaving inter_collective_io ret_value = %d\n", ret_value);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_DEBUG_VA(mpi_rank, "before leaving inter_collective_io ret_value = %d", ret_value);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
#endif
FUNC_LEAVE_NOAPI(ret_value)
@@ -2157,10 +2493,21 @@ static herr_t
H5D__final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t mpi_buf_count,
MPI_Datatype mpi_file_type, MPI_Datatype mpi_buf_type)
{
+#ifdef H5Dmpio_DEBUG
+ int mpi_rank;
+#endif
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
+#ifdef H5Dmpio_DEBUG
+ mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file);
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Final collective I/O");
+ if (mpi_rank < 0)
+ HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain MPI rank")
+#endif
+
/* Pass buf type, file type to the file driver. */
if (H5CX_set_mpi_coll_datatypes(mpi_buf_type, mpi_file_type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O collective I/O datatypes")
@@ -2175,10 +2522,12 @@ H5D__final_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
} /* end else */
done:
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "ret_value before leaving final_collective_io=%d\n", ret_value);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_DEBUG_VA(mpi_rank, "ret_value before leaving final_collective_io=%d", ret_value);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
#endif
+
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__final_collective_io */
@@ -2220,62 +2569,149 @@ H5D__cmp_chunk_addr(const void *chunk_addr_info1, const void *chunk_addr_info2)
*
* Return: -1, 0, 1
*
- * Programmer: Jordan Henderson
- * Wednesday, Nov. 30th, 2016
- *
*-------------------------------------------------------------------------
*/
static int
H5D__cmp_filtered_collective_io_info_entry(const void *filtered_collective_io_info_entry1,
const void *filtered_collective_io_info_entry2)
{
- haddr_t addr1 = HADDR_UNDEF, addr2 = HADDR_UNDEF;
+ const H5D_filtered_collective_io_info_t *entry1;
+ const H5D_filtered_collective_io_info_t *entry2;
+ haddr_t addr1 = HADDR_UNDEF;
+ haddr_t addr2 = HADDR_UNDEF;
+ int ret_value;
FUNC_ENTER_STATIC_NOERR
- addr1 = ((const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry1)
- ->chunk_states.new_chunk.offset;
- addr2 = ((const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry2)
- ->chunk_states.new_chunk.offset;
+ entry1 = (const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry1;
+ entry2 = (const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry2;
- FUNC_LEAVE_NOAPI(H5F_addr_cmp(addr1, addr2))
-} /* end H5D__cmp_filtered_collective_io_info_entry() */
+ addr1 = entry1->chunk_new.offset;
+ addr2 = entry2->chunk_new.offset;
+
+ /*
+ * If both chunk addresses are defined, H5F_addr_cmp is safe to use.
+ * Otherwise, if both addresses aren't defined, compared chunk
+ * entries based on their chunk index. Finally, if only one chunk
+ * address is defined, return the appropriate value based on which
+ * is defined.
+ */
+ if (H5F_addr_defined(addr1) && H5F_addr_defined(addr2)) {
+ ret_value = H5F_addr_cmp(addr1, addr2);
+ }
+ else if (!H5F_addr_defined(addr1) && !H5F_addr_defined(addr2)) {
+ hsize_t chunk_idx1 = entry1->index_info.chunk_idx;
+ hsize_t chunk_idx2 = entry2->index_info.chunk_idx;
-#if MPI_VERSION >= 3
+ ret_value = (chunk_idx1 > chunk_idx2) - (chunk_idx1 < chunk_idx2);
+ }
+ else
+ ret_value = H5F_addr_defined(addr1) ? 1 : -1;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__cmp_filtered_collective_io_info_entry() */
/*-------------------------------------------------------------------------
- * Function: H5D__cmp_filtered_collective_io_info_entry_owner
+ * Function: H5D__cmp_chunk_redistribute_info
*
- * Purpose: Routine to compare filtered collective chunk io info
- * entries's original owner fields
+ * Purpose: Routine to compare two H5D_chunk_redistribute_info_t
+ * structures
*
- * Description: Callback for qsort() to compare filtered collective chunk
- * io info entries's original owner fields
+ * Description: Callback for qsort() to compare two
+ * H5D_chunk_redistribute_info_t structures
+ *
+ * Return: -1, 0, 1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5D__cmp_chunk_redistribute_info(const void *_entry1, const void *_entry2)
+{
+ const H5D_chunk_redistribute_info_t *entry1;
+ const H5D_chunk_redistribute_info_t *entry2;
+ hsize_t chunk_index1;
+ hsize_t chunk_index2;
+ int ret_value;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ entry1 = (const H5D_chunk_redistribute_info_t *)_entry1;
+ entry2 = (const H5D_chunk_redistribute_info_t *)_entry2;
+
+ chunk_index1 = entry1->chunk_idx;
+ chunk_index2 = entry2->chunk_idx;
+
+ if (chunk_index1 == chunk_index2) {
+ int orig_owner1 = entry1->orig_owner;
+ int orig_owner2 = entry2->orig_owner;
+
+ ret_value = (orig_owner1 > orig_owner2) - (orig_owner1 < orig_owner2);
+ }
+ else
+ ret_value = (chunk_index1 > chunk_index2) - (chunk_index1 < chunk_index2);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__cmp_chunk_redistribute_info() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__cmp_chunk_redistribute_info_orig_owner
*
- * Return: The difference between the two
- * H5D_filtered_collective_io_info_t's original owner fields
+ * Purpose: Routine to compare the original owning MPI rank for two
+ * H5D_chunk_redistribute_info_t structures
*
- * Programmer: Jordan Henderson
- * Monday, Apr. 10th, 2017
+ * Description: Callback for qsort() to compare the original owning MPI
+ * rank for two H5D_chunk_redistribute_info_t
+ * structures
+ *
+ * Return: -1, 0, 1
*
*-------------------------------------------------------------------------
*/
static int
-H5D__cmp_filtered_collective_io_info_entry_owner(const void *filtered_collective_io_info_entry1,
- const void *filtered_collective_io_info_entry2)
+H5D__cmp_chunk_redistribute_info_orig_owner(const void *_entry1, const void *_entry2)
{
- int owner1 = -1, owner2 = -1;
+ const H5D_chunk_redistribute_info_t *entry1;
+ const H5D_chunk_redistribute_info_t *entry2;
+ int owner1 = -1;
+ int owner2 = -1;
+ int ret_value;
FUNC_ENTER_STATIC_NOERR
- owner1 = ((const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry1)
- ->owners.original_owner;
- owner2 = ((const H5D_filtered_collective_io_info_t *)filtered_collective_io_info_entry2)
- ->owners.original_owner;
+ entry1 = (const H5D_chunk_redistribute_info_t *)_entry1;
+ entry2 = (const H5D_chunk_redistribute_info_t *)_entry2;
- FUNC_LEAVE_NOAPI(owner1 - owner2)
-} /* end H5D__cmp_filtered_collective_io_info_entry_owner() */
-#endif
+ owner1 = entry1->orig_owner;
+ owner2 = entry2->orig_owner;
+
+ if (owner1 == owner2) {
+ haddr_t addr1 = entry1->chunk_block.offset;
+ haddr_t addr2 = entry2->chunk_block.offset;
+
+ /*
+ * If both chunk addresses are defined, H5F_addr_cmp is safe to use.
+ * Otherwise, if both addresses aren't defined, compared chunk
+ * entries based on their chunk index. Finally, if only one chunk
+ * address is defined, return the appropriate value based on which
+ * is defined.
+ */
+ if (H5F_addr_defined(addr1) && H5F_addr_defined(addr2)) {
+ ret_value = H5F_addr_cmp(addr1, addr2);
+ }
+ else if (!H5F_addr_defined(addr1) && !H5F_addr_defined(addr2)) {
+ hsize_t chunk_idx1 = entry1->chunk_idx;
+ hsize_t chunk_idx2 = entry2->chunk_idx;
+
+ ret_value = (chunk_idx1 > chunk_idx2) - (chunk_idx1 < chunk_idx2);
+ }
+ else
+ ret_value = H5F_addr_defined(addr1) ? 1 : -1;
+ }
+ else
+ ret_value = (owner1 > owner2) - (owner1 < owner2);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__cmp_chunk_redistribute_info_orig_owner() */
/*-------------------------------------------------------------------------
* Function: H5D__sort_chunk
@@ -2304,26 +2740,24 @@ H5D__cmp_filtered_collective_io_info_entry_owner(const void *filtered_collective
*/
static herr_t
H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
- H5D_chunk_addr_info_t chunk_addr_info_array[], int sum_chunk)
+ H5D_chunk_addr_info_t chunk_addr_info_array[], int sum_chunk, int mpi_rank, int mpi_size)
{
- H5SL_node_t * chunk_node; /* Current node in chunk skip list */
- H5D_chunk_info_t *chunk_info; /* Current chunking info. of this node. */
- haddr_t chunk_addr; /* Current chunking address of this node */
- haddr_t *total_chunk_addr_array = NULL; /* The array of chunk address for the total number of chunk */
- hbool_t do_sort = FALSE; /* Whether the addresses need to be sorted */
- int bsearch_coll_chunk_threshold;
- int many_chunk_opt = H5D_OBTAIN_ONE_CHUNK_ADDR_IND;
- int mpi_size; /* Number of MPI processes */
- int mpi_code; /* MPI return code */
- int i; /* Local index variable */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5SL_node_t * chunk_node; /* Current node in chunk skip list */
+ H5D_chunk_info_t *chunk_info; /* Current chunking info. of this node. */
+ haddr_t chunk_addr; /* Current chunking address of this node */
+ haddr_t *total_chunk_addr_array = NULL; /* The array of chunk address for the total number of chunk */
+ H5P_coll_md_read_flag_t md_reads_file_flag;
+ hbool_t md_reads_context_flag;
+ hbool_t restore_md_reads_state = FALSE;
+ hbool_t do_sort = FALSE; /* Whether the addresses need to be sorted */
+ int bsearch_coll_chunk_threshold;
+ int many_chunk_opt = H5D_OBTAIN_ONE_CHUNK_ADDR_IND;
+ int mpi_code; /* MPI return code */
+ int i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
- /* Retrieve # of MPI processes */
- if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
-
/* Calculate the actual threshold to obtain all chunk addresses collectively
* The bigger this number is, the more possible the use of obtaining chunk
* address collectively.
@@ -2337,31 +2771,56 @@ H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
((sum_chunk / mpi_size) >= H5D_ALL_CHUNK_ADDR_THRES_COL_NUM))
many_chunk_opt = H5D_OBTAIN_ALL_CHUNK_ADDR_COL;
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "many_chunk_opt= %d\n", many_chunk_opt);
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG_VA(mpi_rank, "many_chunk_opt = %d", many_chunk_opt);
#endif
/* If we need to optimize the way to obtain the chunk address */
if (many_chunk_opt != H5D_OBTAIN_ONE_CHUNK_ADDR_IND) {
- int mpi_rank;
-
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL\n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "Coming inside H5D_OBTAIN_ALL_CHUNK_ADDR_COL");
#endif
/* Allocate array for chunk addresses */
if (NULL == (total_chunk_addr_array =
(haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)fm->layout->u.chunk.nchunks)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory chunk address array")
- /* Retrieve all the chunk addresses with process 0 */
- if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
-
if (mpi_rank == 0) {
- if (H5D__chunk_addrmap(io_info, total_chunk_addr_array) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+ herr_t result;
+
+ /*
+ * If enabled, disable collective metadata reads here.
+ * Since the chunk address mapping is done on rank 0
+ * only here, it will cause problems if collective
+ * metadata reads are enabled.
+ */
+ if (H5F_get_coll_metadata_reads(io_info->dset->oloc.file)) {
+ md_reads_file_flag = H5P_FORCE_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(io_info->dset->oloc.file, &md_reads_file_flag,
+ &md_reads_context_flag);
+ restore_md_reads_state = TRUE;
+ }
+
+ result = H5D__chunk_addrmap(io_info, total_chunk_addr_array);
+
+ /* Ensure that we restore the old collective metadata reads state */
+ if (restore_md_reads_state) {
+ H5F_set_coll_metadata_reads(io_info->dset->oloc.file, &md_reads_file_flag,
+ &md_reads_context_flag);
+ restore_md_reads_state = FALSE;
+ }
+
+ if (result < 0) {
+ size_t u;
+
+ /* Clear total chunk address array */
+ for (u = 0; u < (size_t)fm->layout->u.chunk.nchunks; u++)
+ total_chunk_addr_array[u] = HADDR_UNDEF;
+
+ /* Push error, but still participate in following MPI_Bcast */
+ HDONE_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get chunk address")
+ }
} /* end if */
/* Broadcasting the MPI_IO option info. and chunk address info. */
@@ -2405,10 +2864,10 @@ H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
chunk_node = H5SL_next(chunk_node);
} /* end while */
-#ifdef H5D_DEBUG
- if (H5DEBUG(D))
- HDfprintf(H5DEBUG(D), "before Qsort\n");
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_DEBUG(mpi_rank, "before Qsort");
#endif
+
if (do_sort) {
size_t num_chunks = H5SL_count(fm->sel_chunks);
@@ -2416,6 +2875,10 @@ H5D__sort_chunk(H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
} /* end if */
done:
+ /* Re-enable collective metadata reads if we disabled them */
+ if (restore_md_reads_state)
+ H5F_set_coll_metadata_reads(io_info->dset->oloc.file, &md_reads_file_flag, &md_reads_context_flag);
+
if (total_chunk_addr_array)
H5MM_xfree(total_chunk_addr_array);
@@ -2461,22 +2924,24 @@ done:
*/
static herr_t
H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm, uint8_t assign_io_mode[],
- haddr_t chunk_addr[])
+ haddr_t chunk_addr[], int mpi_rank, int mpi_size)
{
- size_t total_chunks;
- unsigned percent_nproc_per_chunk, threshold_nproc_per_chunk;
- uint8_t * io_mode_info = NULL;
- uint8_t * recv_io_mode_info = NULL;
- uint8_t * mergebuf = NULL;
- uint8_t * tempbuf;
- H5SL_node_t * chunk_node;
- H5D_chunk_info_t *chunk_info;
- int mpi_size, mpi_rank;
- MPI_Comm comm;
- int root;
- size_t ic;
- int mpi_code;
- herr_t ret_value = SUCCEED;
+ size_t total_chunks;
+ unsigned percent_nproc_per_chunk, threshold_nproc_per_chunk;
+ uint8_t * io_mode_info = NULL;
+ uint8_t * recv_io_mode_info = NULL;
+ uint8_t * mergebuf = NULL;
+ uint8_t * tempbuf;
+ H5SL_node_t * chunk_node;
+ H5D_chunk_info_t * chunk_info;
+ H5P_coll_md_read_flag_t md_reads_file_flag;
+ hbool_t md_reads_context_flag;
+ hbool_t restore_md_reads_state = FALSE;
+ MPI_Comm comm;
+ int root;
+ size_t ic;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -2484,12 +2949,6 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm, uint8_t assig
root = 0;
comm = io_info->comm;
- /* Obtain the number of process and the current rank of the process */
- if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
- if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
-
/* Setup parameters */
H5_CHECKED_ASSIGN(total_chunks, size_t, fm->layout->u.chunk.nchunks, hsize_t);
if (H5CX_get_mpio_chunk_opt_ratio(&percent_nproc_per_chunk) < 0)
@@ -2536,6 +2995,20 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm, uint8_t assig
size_t nproc;
unsigned *nproc_per_chunk;
+ /*
+ * If enabled, disable collective metadata reads here.
+ * Since the chunk address mapping is done on rank 0
+ * only here, it will cause problems if collective
+ * metadata reads are enabled.
+ */
+ if (H5F_get_coll_metadata_reads(io_info->dset->oloc.file)) {
+ md_reads_file_flag = H5P_FORCE_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(io_info->dset->oloc.file, &md_reads_file_flag,
+ &md_reads_context_flag);
+ restore_md_reads_state = TRUE;
+ }
+
/* pre-computing: calculate number of processes and
regularity of the selection occupied in each chunk */
if (NULL == (nproc_per_chunk = (unsigned *)H5MM_calloc(total_chunks * sizeof(unsigned))))
@@ -2602,6 +3075,10 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_chunk_map_t *fm, uint8_t assig
#endif
done:
+ /* Re-enable collective metadata reads if we disabled them */
+ if (restore_md_reads_state)
+ H5F_set_coll_metadata_reads(io_info->dset->oloc.file, &md_reads_file_flag, &md_reads_context_flag);
+
if (io_mode_info)
H5MM_free(io_mode_info);
if (mergebuf)
@@ -2615,34 +3092,32 @@ done:
} /* end H5D__obtain_mpio_mode() */
/*-------------------------------------------------------------------------
- * Function: H5D__construct_filtered_io_info_list
+ * Function: H5D__mpio_collective_filtered_chunk_io_setup
*
* Purpose: Constructs a list of entries which contain the necessary
* information for inter-process communication when performing
* collective io on filtered chunks. This list is used by
- * each process when performing I/O on locally selected chunks
- * and also in operations that must be collectively done
- * on every chunk, such as chunk re-allocation, insertion of
- * chunks into the chunk index, etc.
+ * each MPI rank when performing I/O on locally selected
+ * chunks and also in operations that must be collectively
+ * done on every chunk, such as chunk re-allocation, insertion
+ * of chunks into the chunk index, etc.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Jordan Henderson
- * Tuesday, January 10th, 2017
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- const H5D_chunk_map_t * fm,
- H5D_filtered_collective_io_info_t **chunk_list, size_t *num_entries)
+H5D__mpio_collective_filtered_chunk_io_setup(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
+ const H5D_chunk_map_t * fm,
+ H5D_filtered_collective_io_info_t **chunk_list,
+ size_t *num_entries, int mpi_rank)
{
- H5D_filtered_collective_io_info_t *local_info_array =
- NULL; /* The list of initially selected chunks for this process */
- size_t num_chunks_selected;
- size_t i;
- int mpi_rank;
- herr_t ret_value = SUCCEED;
+ H5D_filtered_collective_io_info_t *local_info_array = NULL;
+ H5D_chunk_ud_t udata;
+ hbool_t filter_partial_edge_chunks;
+ size_t num_chunks_selected;
+ size_t i;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -2652,19 +3127,23 @@ H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_typ
HDassert(chunk_list);
HDassert(num_entries);
- if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Filtered Collective I/O Setup");
+#endif
- /* Each process builds a local list of the chunks they have selected */
+ /* Each rank builds a local list of the chunks they have selected */
if ((num_chunks_selected = H5SL_count(fm->sel_chunks))) {
H5D_chunk_info_t *chunk_info;
- H5D_chunk_ud_t udata;
H5SL_node_t * chunk_node;
hsize_t select_npoints;
- hssize_t chunk_npoints;
+ hbool_t need_sort = FALSE;
- if (NULL == (local_info_array = (H5D_filtered_collective_io_info_t *)H5MM_malloc(
- num_chunks_selected * sizeof(H5D_filtered_collective_io_info_t))))
+ /* Determine whether partial edge chunks should be filtered */
+ filter_partial_edge_chunks = !(io_info->dset->shared->layout.u.chunk.flags &
+ H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS);
+
+ if (NULL == (local_info_array = H5MM_malloc(num_chunks_selected * sizeof(*local_info_array))))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate local io info array buffer")
chunk_node = H5SL_first(fm->sel_chunks);
@@ -2675,275 +3154,787 @@ H5D__construct_filtered_io_info_list(const H5D_io_info_t *io_info, const H5D_typ
if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
- local_info_array[i].index = chunk_info->index;
- local_info_array[i].chunk_states.chunk_current = local_info_array[i].chunk_states.new_chunk =
- udata.chunk_block;
- local_info_array[i].num_writers = 0;
- local_info_array[i].owners.original_owner = local_info_array[i].owners.new_owner = mpi_rank;
- local_info_array[i].buf = NULL;
-
- local_info_array[i].async_info.num_receive_requests = 0;
- local_info_array[i].async_info.receive_buffer_array = NULL;
- local_info_array[i].async_info.receive_requests_array = NULL;
-
- H5MM_memcpy(local_info_array[i].scaled, chunk_info->scaled, sizeof(chunk_info->scaled));
-
- select_npoints = H5S_GET_SELECT_NPOINTS(chunk_info->mspace);
- local_info_array[i].io_size = (size_t)select_npoints * type_info->src_type_size;
-
- /* Currently the full overwrite status of a chunk is only obtained on a per-process
- * basis. This means that if the total selection in the chunk, as determined by the combination
- * of selections of all of the processes interested in the chunk, covers the entire chunk,
- * the performance optimization of not reading the chunk from the file is still valid, but
- * is not applied in the current implementation. Something like an appropriately placed
- * MPI_Allreduce or a running total of the number of chunk points selected during chunk
- * redistribution should suffice for implementing this case - JTH.
+ /* Initialize rank-local chunk info */
+ local_info_array[i].chunk_info = chunk_info;
+ local_info_array[i].chunk_buf_size = 0;
+ local_info_array[i].num_writers = 0;
+ local_info_array[i].orig_owner = mpi_rank;
+ local_info_array[i].new_owner = mpi_rank;
+ local_info_array[i].buf = NULL;
+
+ select_npoints = H5S_GET_SELECT_NPOINTS(chunk_info->fspace);
+ local_info_array[i].io_size = (size_t)select_npoints * type_info->dst_type_size;
+
+ /*
+ * Determine whether this chunk will need to be read from the file. If this is
+ * a read operation, the chunk will be read. If this is a write operation, we
+ * generally need to read a filtered chunk from the file before modifying it,
+ * unless the chunk is being fully overwritten.
+ *
+ * TODO: Currently the full overwrite status of a chunk is only obtained on a
+ * per-rank basis. This means that if the total selection in the chunk, as
+ * determined by the combination of selections of all of the ranks interested in
+ * the chunk, covers the entire chunk, the performance optimization of not reading
+ * the chunk from the file is still valid, but is not applied in the current
+ * implementation.
+ *
+ * To implement this case, a few approaches were considered:
+ *
+ * - Keep a running total (distributed to each rank) of the number of chunk
+ * elements selected during chunk redistribution and compare that to the total
+ * number of elements in the chunk once redistribution is finished
+ *
+ * - Process all incoming chunk messages before doing I/O (these are currently
+ * processed AFTER doing I/O), combine the owning rank's selection in a chunk
+ * with the selections received from other ranks and check to see whether that
+ * combined selection covers the entire chunk
+ *
+ * The first approach will be dangerous if the application performs an overlapping
+ * write to a chunk, as the number of selected elements can equal or exceed the
+ * number of elements in the chunk without the whole chunk selection being covered.
+ * While it might be considered erroneous for an application to do an overlapping
+ * write, we don't explicitly disallow it.
+ *
+ * The second approach contains a bit of complexity in that part of the chunk
+ * messages will be needed before doing I/O and part will be needed after doing I/O.
+ * Since modification data from chunk messages can't be applied until after any I/O
+ * is performed (otherwise, we'll overwrite any applied modification data), chunk
+ * messages are currently entirely processed after I/O. However, in order to determine
+ * if a chunk is being fully overwritten, we need the dataspace portion of the chunk
+ * messages before doing I/O. The naive way to do this is to process chunk messages
+ * twice, using just the relevant information from the message before and after I/O.
+ * The better way would be to avoid processing chunk messages twice by extracting (and
+ * keeping around) the dataspace portion of the message before I/O and processing the
+ * rest of the chunk message after I/O. Note that the dataspace portion of each chunk
+ * message is used to correctly apply chunk modification data from the message, so
+ * must be kept around both before and after I/O in this case.
+ */
+ if (io_info->op_type == H5D_IO_OP_READ)
+ local_info_array[i].need_read = TRUE;
+ else {
+ local_info_array[i].need_read =
+ local_info_array[i].io_size < (size_t)io_info->dset->shared->layout.u.chunk.size;
+ }
+
+ local_info_array[i].skip_filter_pline = FALSE;
+ if (!filter_partial_edge_chunks) {
+ /*
+ * If this is a partial edge chunk and the "don't filter partial edge
+ * chunks" flag is set, make sure not to apply filters to the chunk.
+ */
+ if (H5D__chunk_is_partial_edge_chunk(io_info->dset->shared->ndims,
+ io_info->dset->shared->layout.u.chunk.dim,
+ chunk_info->scaled, io_info->dset->shared->curr_dims))
+ local_info_array[i].skip_filter_pline = TRUE;
+ }
+
+ /* Initialize the chunk's shared info */
+ local_info_array[i].chunk_current = udata.chunk_block;
+ local_info_array[i].chunk_new = udata.chunk_block;
+
+ /*
+ * Check if the list is not in ascending order of offset in the file
+ * or has unallocated chunks. In either case, the list should get
+ * sorted.
+ */
+ if (i) {
+ haddr_t curr_chunk_offset = local_info_array[i].chunk_current.offset;
+ haddr_t prev_chunk_offset = local_info_array[i - 1].chunk_current.offset;
+
+ if (!H5F_addr_defined(prev_chunk_offset) || !H5F_addr_defined(curr_chunk_offset) ||
+ (curr_chunk_offset < prev_chunk_offset))
+ need_sort = TRUE;
+ }
+
+ /*
+ * Extensible arrays may calculate a chunk's index a little differently
+ * than normal when the dataset's unlimited dimension is not the
+ * slowest-changing dimension, so set the index here based on what the
+ * extensible array code calculated instead of what was calculated
+ * in the chunk file mapping.
*/
- if ((chunk_npoints = H5S_GET_EXTENT_NPOINTS(chunk_info->fspace)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
- local_info_array[i].full_overwrite =
- (local_info_array[i].io_size >= (hsize_t)chunk_npoints * type_info->dst_type_size) ? TRUE
- : FALSE;
+ if (io_info->dset->shared->layout.u.chunk.idx_type == H5D_CHUNK_IDX_EARRAY)
+ local_info_array[i].index_info.chunk_idx = udata.chunk_idx;
+ else
+ local_info_array[i].index_info.chunk_idx = chunk_info->index;
+
+ local_info_array[i].index_info.filter_mask = udata.filter_mask;
+ local_info_array[i].index_info.need_insert = FALSE;
chunk_node = H5SL_next(chunk_node);
- } /* end for */
- } /* end if */
-
- /* Redistribute shared chunks to new owners as necessary */
- if (io_info->op_type == H5D_IO_OP_WRITE)
-#if MPI_VERSION >= 3
- if (H5D__chunk_redistribute_shared_chunks(io_info, type_info, fm, local_info_array,
- &num_chunks_selected) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to redistribute shared chunks")
-#else
- HGOTO_ERROR(
- H5E_DATASET, H5E_WRITEERROR, FAIL,
- "unable to redistribute shared chunks - MPI version < 3 (MPI_Mprobe and MPI_Imrecv missing)")
+ }
+
+ /* Ensure the chunk list is sorted in ascending order of offset in the file */
+ if (need_sort)
+ HDqsort(local_info_array, num_chunks_selected, sizeof(H5D_filtered_collective_io_info_t),
+ H5D__cmp_filtered_collective_io_info_entry);
+
+#ifdef H5Dmpio_DEBUG
+ H5D__mpio_dump_collective_filtered_chunk_list(local_info_array, num_chunks_selected, mpi_rank);
#endif
+ }
+ else if (H5F_get_coll_metadata_reads(io_info->dset->oloc.file)) {
+ hsize_t scaled[H5O_LAYOUT_NDIMS] = {0};
+
+ /*
+ * If this rank has no selection in the dataset and collective
+ * metadata reads are enabled, do a fake lookup of a chunk to
+ * ensure that this rank has the chunk index opened. Otherwise,
+ * only the ranks that had a selection will have opened the
+ * chunk index and they will have done so independently. Therefore,
+ * when ranks with no selection participate in later collective
+ * metadata reads, they will try to open the chunk index collectively
+ * and issues will occur since other ranks won't participate.
+ *
+ * In the future, we should consider having a chunk index "open"
+ * callback that can be used to ensure collectivity between ranks
+ * in a more natural way, but this hack should suffice for now.
+ */
+ if (H5D__chunk_lookup(io_info->dset, scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ }
*chunk_list = local_info_array;
*num_entries = num_chunks_selected;
done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__construct_filtered_io_info_list() */
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
-#if MPI_VERSION >= 3
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_collective_filtered_chunk_io_setup() */
/*-------------------------------------------------------------------------
- * Function: H5D__chunk_redistribute_shared_chunks
- *
- * Purpose: When performing a collective write on a Dataset with
- * filters applied, this function is used to redistribute any
- * chunks which are selected by more than one process, so as
- * to preserve file integrity after the write by ensuring
- * that any shared chunks are only modified by one process.
- *
- * The current implementation follows this 3-phase process:
- *
- * - Collect everyone's list of chunks into one large list,
- * sort the list in increasing order of chunk offset in the
- * file and hand the list off to rank 0
- *
- * - Rank 0 scans the list looking for matching runs of chunk
- * offset in the file (corresponding to a shared chunk which
- * has been selected by more than one rank in the I/O
- * operation) and for each shared chunk, it redistributes
- * the chunk to the process writing to the chunk which
- * currently has the least amount of chunks assigned to it
- * by modifying the "new_owner" field in each of the list
- * entries corresponding to that chunk
- *
- * - After the chunks have been redistributed, rank 0 re-sorts
- * the list in order of previous owner so that each rank
- * will get back exactly the array that they contributed to
- * the redistribution operation, with the "new_owner" field
- * of each chunk they are modifying having possibly been
- * modified. Rank 0 then scatters each segment of the list
- * back to its corresponding rank
+ * Function: H5D__mpio_redistribute_shared_chunks
+ *
+ * Purpose: When performing a parallel write on a chunked Dataset with
+ * filters applied, we must ensure that any particular chunk
+ * is only written to by a single MPI rank in order to avoid
+ * potential data races on the chunk. This function is used to
+ * redistribute (by assigning ownership to a single rank) any
+ * chunks which are selected by more than one MPI rank.
+ *
+ * An initial Allgather is performed to determine how many
+ * chunks each rank has selected in the write operation and
+ * then that number is compared against a threshold value to
+ * determine whether chunk redistribution should be done on
+ * MPI rank 0 only, or on all MPI ranks.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Jordan Henderson
- * Monday, May 1, 2017
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_redistribute_shared_chunks(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, const H5D_io_info_t *io_info,
+ const H5D_chunk_map_t *fm, int mpi_rank, int mpi_size,
+ size_t **rank_chunks_assigned_map)
+{
+ hbool_t redistribute_on_all_ranks;
+ size_t *num_chunks_map = NULL;
+ size_t coll_chunk_list_size = 0;
+ size_t i;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(chunk_list || 0 == chunk_list_num_entries);
+ HDassert(io_info);
+ HDassert(fm);
+ HDassert(mpi_size > 1); /* No chunk sharing is possible for MPI Comm size of 1 */
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Redistribute shared chunks");
+#endif
+
+ /*
+ * Allocate an array for each rank to keep track of the number of
+ * chunks assigned to any other rank in order to cut down on future
+ * MPI communication.
+ */
+ if (NULL == (num_chunks_map = H5MM_malloc((size_t)mpi_size * sizeof(*num_chunks_map))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate assigned chunks array")
+
+ /* Perform initial Allgather to determine the collective chunk list size */
+ if (MPI_SUCCESS != (mpi_code = MPI_Allgather(&chunk_list_num_entries, 1, H5_SIZE_T_AS_MPI_TYPE,
+ num_chunks_map, 1, H5_SIZE_T_AS_MPI_TYPE, io_info->comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
+
+ for (i = 0; i < (size_t)mpi_size; i++)
+ coll_chunk_list_size += num_chunks_map[i];
+
+ /*
+ * Determine whether we should perform chunk redistribution on all
+ * ranks or just rank 0. For a relatively small number of chunks,
+ * we redistribute on all ranks to cut down on MPI communication
+ * overhead. For a larger number of chunks, we redistribute on
+ * rank 0 only to cut down on memory usage.
+ */
+ redistribute_on_all_ranks = coll_chunk_list_size < H5D_CHUNK_REDISTRIBUTE_THRES;
+
+ if (H5D__mpio_redistribute_shared_chunks_int(chunk_list, num_chunks_map, redistribute_on_all_ranks,
+ io_info, fm, mpi_rank, mpi_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREDISTRIBUTE, FAIL, "can't redistribute shared chunks")
+
+ /*
+ * If the caller provided a pointer for the mapping from
+ * rank value -> number of chunks assigned, return that
+ * mapping here.
+ */
+ if (rank_chunks_assigned_map) {
+ /*
+ * If we performed chunk redistribution on rank 0 only, distribute
+ * the rank value -> number of chunks assigned mapping back to all
+ * ranks.
+ */
+ if (!redistribute_on_all_ranks) {
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Bcast(num_chunks_map, mpi_size, H5_SIZE_T_AS_MPI_TYPE, 0, io_info->comm)))
+ HMPI_GOTO_ERROR(FAIL, "couldn't broadcast chunk mapping to other ranks", mpi_code)
+ }
+
+ *rank_chunks_assigned_map = num_chunks_map;
+ }
+
+done:
+ if (!rank_chunks_assigned_map || (ret_value < 0)) {
+ num_chunks_map = H5MM_xfree(num_chunks_map);
+ }
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_redistribute_shared_chunks() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_redistribute_shared_chunks_int
+ *
+ * Purpose: Routine to perform redistribution of shared chunks during
+ * parallel writes to datasets with filters applied.
+ *
+ * If `all_ranks_involved` is TRUE, chunk redistribution
+ * occurs on all MPI ranks. This is usually done when there
+ * is a relatively small number of chunks involved in order to
+ * cut down on MPI communication overhead while increasing
+ * total memory usage a bit.
+ *
+ * If `all_ranks_involved` is FALSE, only rank 0 will perform
+ * chunk redistribution. This is usually done when there is
+ * a relatively large number of chunks involved in order to
+ * cut down on total memory usage at the cost of increased
+ * overhead from MPI communication.
+ *
+ * This implementation is as follows:
+ *
+ * - All MPI ranks send their list of selected chunks to the
+ * ranks involved in chunk redistribution. Then, the
+ * involved ranks sort this new list in order of chunk
+ * index.
+ *
+ * - The involved ranks scan the list looking for matching
+ * runs of chunk index values (corresponding to a shared
+ * chunk which has been selected by more than one rank in
+ * the I/O operation) and for each shared chunk,
+ * redistribute the chunk to the MPI rank writing to the
+ * chunk which currently has the least amount of chunks
+ * assigned to it. This is done by modifying the "new_owner"
+ * field in each of the list entries corresponding to that
+ * chunk. The involved ranks then re-sort the list in order
+ * of original chunk owner so that each rank's section of
+ * contributed chunks is contiguous in the collective chunk
+ * list.
+ *
+ * - If chunk redistribution occurred on all ranks, each rank
+ * scans through the collective chunk list to find their
+ * contributed section of chunks and uses that to update
+ * their local chunk list with the newly-updated "new_owner"
+ * and "num_writers" fields. If chunk redistribution
+ * occurred only on rank 0, an MPI_Scatterv operation will
+ * be used to scatter the segments of the collective chunk
+ * list from rank 0 back to the corresponding ranks.
+ *
+ * Return: Non-negative on success/Negative on failure
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- const H5D_chunk_map_t * fm,
- H5D_filtered_collective_io_info_t *local_chunk_array,
- size_t * local_chunk_array_num_entries)
+H5D__mpio_redistribute_shared_chunks_int(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t *num_chunks_assigned_map, hbool_t all_ranks_involved,
+ const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
+ int mpi_rank, int mpi_size)
{
- H5D_filtered_collective_io_info_t *shared_chunks_info_array =
- NULL; /* The list of all chunks selected in the operation by all processes */
- H5S_sel_iter_t *mem_iter = NULL; /* Memory iterator for H5D__gather_mem */
- unsigned char **mod_data =
- NULL; /* Array of chunk modification data buffers sent by a process to new chunk owners */
- MPI_Request *send_requests = NULL; /* Array of MPI_Isend chunk modification data send requests */
- MPI_Status * send_statuses = NULL; /* Array of MPI_Isend chunk modification send statuses */
- hbool_t mem_iter_init = FALSE;
- size_t shared_chunks_info_array_num_entries = 0;
- size_t num_send_requests = 0;
- size_t * num_assigned_chunks_array = NULL;
- size_t i, last_assigned_idx;
- int * send_counts = NULL;
- int * send_displacements = NULL;
- int scatter_recvcount_int;
- int mpi_rank, mpi_size, mpi_code;
+ MPI_Datatype struct_type;
+ MPI_Datatype packed_type;
+ hbool_t struct_type_derived = FALSE;
+ hbool_t packed_type_derived = FALSE;
+ size_t i;
+ size_t coll_chunk_list_num_entries = 0;
+ void * coll_chunk_list = NULL;
+ int * counts_disps_array = NULL;
+ int * counts_ptr = NULL;
+ int * displacements_ptr = NULL;
+ int num_chunks_int;
+ int mpi_code;
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
+ HDassert(num_chunks_assigned_map);
+ HDassert(chunk_list || 0 == num_chunks_assigned_map[mpi_rank]);
HDassert(io_info);
- HDassert(type_info);
HDassert(fm);
- HDassert(local_chunk_array_num_entries);
+ HDassert(mpi_size > 1);
- if ((mpi_rank = H5F_mpi_get_rank(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi rank")
- if ((mpi_size = H5F_mpi_get_size(io_info->dset->oloc.file)) < 0)
- HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Redistribute shared chunks (internal)");
+#endif
- /* Set to latest format for encoding dataspace */
- H5CX_set_libver_bounds(NULL);
+ /*
+ * Make sure it's safe to cast this rank's number
+ * of chunks to be sent into an int for MPI
+ */
+ H5_CHECKED_ASSIGN(num_chunks_int, int, num_chunks_assigned_map[mpi_rank], size_t);
- if (*local_chunk_array_num_entries)
- if (NULL == (send_requests =
- (MPI_Request *)H5MM_malloc(*local_chunk_array_num_entries * sizeof(MPI_Request))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate send requests buffer")
+ /*
+ * Phase 1 - Participate in collective gathering of every rank's
+ * list of chunks to the ranks which are performing the redistribution
+ * operation.
+ */
- if (NULL == (mem_iter = (H5S_sel_iter_t *)H5MM_malloc(sizeof(H5S_sel_iter_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator")
+ if (all_ranks_involved || (mpi_rank == 0)) {
+ /*
+ * Allocate array to store the receive counts of each rank, as well as
+ * the displacements into the final array where each rank will place
+ * their data. The first half of the array contains the receive counts
+ * (in rank order), while the latter half contains the displacements
+ * (also in rank order).
+ */
+ if (NULL == (counts_disps_array = H5MM_malloc(2 * (size_t)mpi_size * sizeof(*counts_disps_array)))) {
+ /* Push an error, but still participate in collective gather operation */
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "couldn't allocate receive counts and displacements array")
+ }
+ else {
+ /* Set the receive counts from the assigned chunks map */
+ counts_ptr = counts_disps_array;
+
+ for (i = 0; i < (size_t)mpi_size; i++)
+ H5_CHECKED_ASSIGN(counts_ptr[i], int, num_chunks_assigned_map[i], size_t);
+
+ /* Set the displacements into the receive buffer for the gather operation */
+ displacements_ptr = &counts_disps_array[mpi_size];
+
+ *displacements_ptr = 0;
+ for (i = 1; i < (size_t)mpi_size; i++)
+ displacements_ptr[i] = displacements_ptr[i - 1] + counts_ptr[i - 1];
+ }
+ }
- /* Gather every rank's list of chunks to rank 0 to allow it to perform the redistribution operation. After
- * this call, the gathered list will initially be sorted in increasing order of chunk offset in the file.
+ /*
+ * Construct MPI derived types for extracting information
+ * necessary for MPI communication
*/
- if (H5D__mpio_array_gatherv(local_chunk_array, *local_chunk_array_num_entries,
- sizeof(H5D_filtered_collective_io_info_t), (void **)&shared_chunks_info_array,
- &shared_chunks_info_array_num_entries, false, 0, io_info->comm,
- H5D__cmp_filtered_collective_io_info_entry) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "couldn't gather array")
+ if (H5D__mpio_get_chunk_redistribute_info_types(&packed_type, &packed_type_derived, &struct_type,
+ &struct_type_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "can't create derived datatypes for chunk redistribution info")
+
+ /* Perform gather operation */
+ if (H5_mpio_gatherv_alloc(chunk_list, num_chunks_int, struct_type, counts_ptr, displacements_ptr,
+ packed_type, all_ranks_involved, 0, io_info->comm, mpi_rank, mpi_size,
+ &coll_chunk_list, &coll_chunk_list_num_entries) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
+ "can't gather chunk redistribution info to involved ranks")
- /* Rank 0 redistributes any shared chunks to new owners as necessary */
- if (mpi_rank == 0) {
- if (NULL == (send_counts = (int *)H5MM_calloc((size_t)mpi_size * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate send counts buffer")
+ /*
+ * If all ranks are redistributing shared chunks, we no
+ * longer need the receive counts and displacements array
+ */
+ if (all_ranks_involved) {
+ counts_disps_array = H5MM_xfree(counts_disps_array);
+ }
- if (NULL == (send_displacements = (int *)H5MM_malloc((size_t)mpi_size * sizeof(int))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate send displacements buffer")
+ /*
+ * Phase 2 - Involved ranks now redistribute any shared chunks to new
+ * owners as necessary.
+ */
- if (NULL == (num_assigned_chunks_array = (size_t *)H5MM_calloc((size_t)mpi_size * sizeof(size_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "unable to allocate number of assigned chunks array")
+ if (all_ranks_involved || (mpi_rank == 0)) {
+ H5D_chunk_redistribute_info_t *chunk_entry;
+ hsize_t curr_chunk_idx;
+ size_t set_begin_index;
+ int num_writers;
+ int new_chunk_owner;
- for (i = 0; i < shared_chunks_info_array_num_entries;) {
- H5D_filtered_collective_io_info_t *chunk_entry;
- haddr_t last_seen_addr = shared_chunks_info_array[i].chunk_states.chunk_current.offset;
- size_t set_begin_index = i;
- size_t num_writers = 0;
- int new_chunk_owner = shared_chunks_info_array[i].owners.original_owner;
+ /* Clear the mapping from rank value -> number of assigned chunks */
+ HDmemset(num_chunks_assigned_map, 0, (size_t)mpi_size * sizeof(*num_chunks_assigned_map));
- /* Process each set of duplicate entries caused by another process writing to the same chunk */
- do {
- chunk_entry = &shared_chunks_info_array[i];
+ /* Sort collective chunk list according to chunk index */
+ HDqsort(coll_chunk_list, coll_chunk_list_num_entries, sizeof(H5D_chunk_redistribute_info_t),
+ H5D__cmp_chunk_redistribute_info);
- send_counts[chunk_entry->owners.original_owner] += (int)sizeof(*chunk_entry);
+ /*
+ * Process all chunks in the collective chunk list.
+ * Note that the loop counter is incremented by both
+ * the outer loop (while processing each entry in
+ * the collective chunk list) and the inner loop
+ * (while processing duplicate entries for shared
+ * chunks).
+ */
+ chunk_entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[0];
+ for (i = 0; i < coll_chunk_list_num_entries;) {
+ /* Set chunk's initial new owner to its original owner */
+ new_chunk_owner = chunk_entry->orig_owner;
+
+ /*
+ * Set the current chunk index so we know when we've processed
+ * all duplicate entries for a particular shared chunk
+ */
+ curr_chunk_idx = chunk_entry->chunk_idx;
+
+ /* Reset the initial number of writers to this chunk */
+ num_writers = 0;
+
+ /* Set index for the beginning of this section of duplicate chunk entries */
+ set_begin_index = i;
- /* The new owner of the chunk is determined by the process
+ /*
+ * Process each chunk entry in the set for the current
+ * (possibly shared) chunk and increment the loop counter
+ * while doing so.
+ */
+ do {
+ /*
+ * The new owner of the chunk is determined by the rank
* writing to the chunk which currently has the least amount
* of chunks assigned to it
*/
- if (num_assigned_chunks_array[chunk_entry->owners.original_owner] <
- num_assigned_chunks_array[new_chunk_owner])
- new_chunk_owner = chunk_entry->owners.original_owner;
+ if (num_chunks_assigned_map[chunk_entry->orig_owner] <
+ num_chunks_assigned_map[new_chunk_owner])
+ new_chunk_owner = chunk_entry->orig_owner;
+ /* Update the number of writers to this particular chunk */
num_writers++;
- } while (++i < shared_chunks_info_array_num_entries &&
- shared_chunks_info_array[i].chunk_states.chunk_current.offset == last_seen_addr);
- /* Set all of the chunk entries' "new_owner" fields */
+ chunk_entry++;
+ } while (++i < coll_chunk_list_num_entries && chunk_entry->chunk_idx == curr_chunk_idx);
+
+ /* We should never have more writers to a chunk than the number of MPI ranks */
+ HDassert(num_writers <= mpi_size);
+
+ /* Set all processed chunk entries' "new_owner" and "num_writers" fields */
for (; set_begin_index < i; set_begin_index++) {
- shared_chunks_info_array[set_begin_index].owners.new_owner = new_chunk_owner;
- shared_chunks_info_array[set_begin_index].num_writers = num_writers;
- } /* end for */
+ H5D_chunk_redistribute_info_t *entry;
- num_assigned_chunks_array[new_chunk_owner]++;
- } /* end for */
+ entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[set_begin_index];
- /* Sort the new list in order of previous owner so that each original owner of a chunk
- * entry gets that entry back, with the possibly newly-modified "new_owner" field
+ entry->new_owner = new_chunk_owner;
+ entry->num_writers = num_writers;
+ }
+
+ /* Update the number of chunks assigned to the MPI rank that now owns this chunk */
+ num_chunks_assigned_map[new_chunk_owner]++;
+ }
+
+ /*
+ * Re-sort the collective chunk list in order of original chunk owner
+ * so that each rank's section of contributed chunks is contiguous in
+ * the collective chunk list.
+ *
+ * NOTE: this re-sort is frail in that it needs to sort the collective
+ * chunk list so that each rank's section of contributed chunks
+ * is in the exact order it was contributed in, or things will
+ * be scrambled when each rank's local chunk list is updated.
+ * Therefore, the sorting algorithm here is tied to the one
+ * used during the I/O setup operation. Specifically, chunks
+ * are first sorted by ascending order of offset in the file and
+ * then by chunk index. In the future, a better redistribution
+ * algorithm may be devised that doesn't rely on frail sorting,
+ * but the current implementation is a quick and naive approach.
*/
- if (shared_chunks_info_array_num_entries > 1)
- HDqsort(shared_chunks_info_array, shared_chunks_info_array_num_entries,
- sizeof(H5D_filtered_collective_io_info_t),
- H5D__cmp_filtered_collective_io_info_entry_owner);
-
- send_displacements[0] = 0;
- for (i = 1; i < (size_t)mpi_size; i++)
- send_displacements[i] = send_displacements[i - 1] + send_counts[i - 1];
- } /* end if */
+ HDqsort(coll_chunk_list, coll_chunk_list_num_entries, sizeof(H5D_chunk_redistribute_info_t),
+ H5D__cmp_chunk_redistribute_info_orig_owner);
+ }
- /* Scatter the segments of the list back to each process */
- H5_CHECKED_ASSIGN(scatter_recvcount_int, int,
- *local_chunk_array_num_entries * sizeof(H5D_filtered_collective_io_info_t), size_t);
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Scatterv(shared_chunks_info_array, send_counts, send_displacements, MPI_BYTE,
- local_chunk_array, scatter_recvcount_int, MPI_BYTE, 0, io_info->comm)))
- HMPI_GOTO_ERROR(FAIL, "unable to scatter shared chunks info buffer", mpi_code)
+ if (all_ranks_involved) {
+ /*
+ * If redistribution occurred on all ranks, search for the section
+ * in the collective chunk list corresponding to this rank's locally
+ * selected chunks and update the local list after redistribution.
+ */
+ for (i = 0; i < coll_chunk_list_num_entries; i++)
+ if (mpi_rank == ((H5D_chunk_redistribute_info_t *)coll_chunk_list)[i].orig_owner)
+ break;
- if (shared_chunks_info_array) {
- H5MM_free(shared_chunks_info_array);
- shared_chunks_info_array = NULL;
- } /* end if */
+ for (size_t j = 0; j < (size_t)num_chunks_int; j++) {
+ H5D_chunk_redistribute_info_t *coll_entry;
- /* Now that the chunks have been redistributed, each process must send its modification data
- * to the new owners of any of the chunks it previously possessed. Accordingly, each process
- * must also issue asynchronous receives for any messages it may receive for each of the
- * chunks it is assigned, in order to avoid potential deadlocking issues.
+ coll_entry = &((H5D_chunk_redistribute_info_t *)coll_chunk_list)[i++];
+
+ chunk_list[j].new_owner = coll_entry->new_owner;
+ chunk_list[j].num_writers = coll_entry->num_writers;
+ }
+ }
+ else {
+ /*
+ * If redistribution occurred only on rank 0, scatter the segments
+ * of the collective chunk list back to each rank so that their
+ * local chunk lists get updated
+ */
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Scatterv(coll_chunk_list, counts_ptr, displacements_ptr, packed_type, chunk_list,
+ num_chunks_int, struct_type, 0, io_info->comm)))
+ HMPI_GOTO_ERROR(FAIL, "unable to scatter shared chunks info buffer", mpi_code)
+ }
+
+#ifdef H5Dmpio_DEBUG
+ H5D__mpio_dump_collective_filtered_chunk_list(chunk_list, num_chunks_assigned_map[mpi_rank], mpi_rank);
+#endif
+
+done:
+ H5MM_free(coll_chunk_list);
+
+ if (struct_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&struct_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+ if (packed_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&packed_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+
+ H5MM_free(counts_disps_array);
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_redistribute_shared_chunks_int() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_share_chunk_modification_data
+ *
+ * Purpose: When performing a parallel write on a chunked dataset with
+ * filters applied, we must first ensure that any particular
+ * chunk is only written to by a single MPI rank in order to
+ * avoid potential data races on the chunk. Once dataset
+ * chunks have been redistributed in a suitable manner, each
+ * MPI rank must send its chunk data to other ranks for each
+ * chunk it no longer owns.
+ *
+ * The current implementation here follows the Nonblocking
+ * Consensus algorithm described in:
+ * http://unixer.de/publications/img/hoefler-dsde-protocols.pdf
+ *
+ * First, each MPI rank scans through its list of selected
+ * chunks and does the following for each chunk:
+ *
+ * * If a chunk in the MPI rank's chunk list is still owned
+ * by that rank, the rank checks how many messages are
+ * incoming for that chunk and adds that to its running
+ * total. Then, the rank updates its local chunk list so
+ * that any previous chunk entries for chunks that are no
+ * longer owned by the rank get overwritten by chunk
+ * entries for chunks the rank still owns. Since the data
+ * for the chunks no longer owned will have already been
+ * sent, those chunks can effectively be discarded.
+ * * If a chunk in the MPI rank's chunk list is no longer
+ * owned by that rank, the rank sends the data it wishes to
+ * update the chunk with to the MPI rank that now has
+ * ownership of that chunk. To do this, it encodes the
+ * chunk's index, its selection in the chunk and its
+ * modification data into a buffer and then posts a
+ * non-blocking MPI_Issend to the owning rank.
+ *
+ * Once this step is complete, all MPI ranks allocate arrays
+ * to hold chunk message receive buffers and MPI request
+ * objects for each non-blocking receive they will post for
+ * incoming chunk modification messages. Then, all MPI ranks
+ * enter a loop that alternates between non-blocking
+ * MPI_Iprobe calls to probe for incoming messages and
+ * MPI_Testall calls to see if all send requests have
+ * completed. As chunk modification messages arrive,
+ * non-blocking MPI_Irecv calls will be posted for each
+ * message.
+ *
+ * Once all send requests have completed, an MPI_Ibarrier is
+ * posted and the loop then alternates between MPI_Iprobe
+ * calls and MPI_Test calls to check if all ranks have reached
+ * the non-blocking barrier. Once all ranks have reached the
+ * barrier, processing can move on to updating the selected
+ * chunks that are owned in the operation.
+ *
+ * Any chunk messages that were received from other ranks
+ * will be returned through the `chunk_msg_bufs` array and
+ * `chunk_msg_bufs_len` will be set appropriately.
+ *
+ * NOTE: The use of non-blocking sends and receives of chunk
+ * data here may contribute to large amounts of memory
+ * usage/MPI request overhead if the number of shared
+ * chunks is high. If this becomes a problem, it may be
+ * useful to split the message receiving loop away so
+ * that chunk modification messages can be received and
+ * processed immediately (MPI_Recv) using a single chunk
+ * message buffer. However, it's possible this may
+ * degrade performance since the chunk message sends
+ * are synchronous (MPI_Issend) in the Nonblocking
+ * Consensus algorithm.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t *chunk_list_num_entries, H5D_io_info_t *io_info,
+ const H5D_type_info_t *type_info, int mpi_rank, int mpi_size,
+ H5D_filtered_collective_io_info_t **chunk_hash_table,
+ unsigned char ***chunk_msg_bufs, int *chunk_msg_bufs_len)
+{
+#if H5_CHECK_MPI_VERSION(3, 0)
+ H5D_filtered_collective_io_info_t *chunk_table = NULL;
+ H5S_sel_iter_t * mem_iter = NULL;
+ unsigned char ** msg_send_bufs = NULL;
+ unsigned char ** msg_recv_bufs = NULL;
+ MPI_Request * send_requests = NULL;
+ MPI_Request * recv_requests = NULL;
+ MPI_Request ibarrier = MPI_REQUEST_NULL;
+ hbool_t mem_iter_init = FALSE;
+ hbool_t ibarrier_posted = FALSE;
+ size_t send_bufs_nalloc = 0;
+ size_t num_send_requests = 0;
+ size_t num_recv_requests = 0;
+ size_t num_msgs_incoming = 0;
+ size_t last_assigned_idx;
+ size_t i;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(chunk_list_num_entries);
+ HDassert(chunk_list || 0 == *chunk_list_num_entries);
+ HDassert(io_info);
+ HDassert(type_info);
+ HDassert(mpi_size > 1);
+ HDassert(chunk_msg_bufs);
+ HDassert(chunk_msg_bufs_len);
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Share chunk modification data");
+#endif
+
+ /* Set to latest format for encoding dataspace */
+ H5CX_set_libver_bounds(NULL);
+
+ if (*chunk_list_num_entries) {
+ /* Allocate a selection iterator for iterating over chunk dataspaces */
+ if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate dataspace selection iterator")
+
+ /*
+ * Allocate send buffer and MPI_Request arrays for non-blocking
+ * sends of outgoing chunk messages
+ */
+ send_bufs_nalloc = H5D_CHUNK_NUM_SEND_MSGS_INIT;
+ if (NULL == (msg_send_bufs = H5MM_malloc(send_bufs_nalloc * sizeof(*msg_send_bufs))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
+ "couldn't allocate chunk modification message buffer array")
+
+ if (NULL == (send_requests = H5MM_malloc(send_bufs_nalloc * sizeof(*send_requests))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate send requests array")
+ }
+
+ /*
+ * For each chunk this rank owns, add to the total number of
+ * incoming MPI messages, then update the local chunk list to
+ * overwrite any previous chunks no longer owned by this rank.
+ * Since the data for those chunks will have already been sent,
+ * this rank should no longer be interested in them and they
+ * can effectively be discarded. This bookkeeping also makes
+ * the code for the collective file space re-allocation and
+ * chunk re-insertion operations a bit simpler.
+ *
+ * For each chunk this rank doesn't own, use non-blocking
+ * synchronous sends to send the data this rank is writing to
+ * the rank that does own the chunk.
*/
- if (*local_chunk_array_num_entries)
- if (NULL == (mod_data = (unsigned char **)H5MM_malloc(*local_chunk_array_num_entries *
- sizeof(unsigned char *))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate modification data buffer array")
-
- /* Perform all the sends on the chunks that this rank doesn't own */
- /* (Sends and recvs must be two separate loops, to avoid deadlock) */
- for (i = 0, last_assigned_idx = 0; i < *local_chunk_array_num_entries; i++) {
- H5D_filtered_collective_io_info_t *chunk_entry = &local_chunk_array[i];
-
- if (mpi_rank != chunk_entry->owners.new_owner) {
- H5D_chunk_info_t *chunk_info = NULL;
+ for (i = 0, last_assigned_idx = 0; i < *chunk_list_num_entries; i++) {
+ H5D_filtered_collective_io_info_t *chunk_entry = &chunk_list[i];
+
+ if (mpi_rank == chunk_entry->new_owner) {
+ num_msgs_incoming += (size_t)(chunk_entry->num_writers - 1);
+
+ /*
+ * Overwrite chunk entries this rank doesn't own with entries that it
+ * does own, since it has sent the necessary data and is no longer
+ * interested in the chunks it doesn't own.
+ */
+ chunk_list[last_assigned_idx] = chunk_list[i];
+
+ /*
+ * Since, at large scale, a chunk's index value may be larger than
+ * the maximum value that can be stored in an int, we cannot rely
+ * on using a chunk's index value as the tag for the MPI messages
+ * sent/received for a chunk. Therefore, add this chunk to a hash
+ * table with the chunk's index as a key so that we can quickly find
+ * the chunk when processing chunk messages that were received. The
+ * message itself will contain the chunk's index so we can update
+ * the correct chunk with the received data.
+ */
+ HASH_ADD(hh, chunk_table, index_info.chunk_idx, sizeof(hsize_t), &chunk_list[last_assigned_idx]);
+
+ last_assigned_idx++;
+ }
+ else {
+ H5D_chunk_info_t *chunk_info = chunk_entry->chunk_info;
unsigned char * mod_data_p = NULL;
hsize_t iter_nelmts;
- size_t mod_data_size;
+ size_t mod_data_size = 0;
+ size_t space_size = 0;
- /* Look up the chunk and get its file and memory dataspaces */
- if (NULL == (chunk_info = (H5D_chunk_info_t *)H5SL_search(fm->sel_chunks, &chunk_entry->index)))
- HGOTO_ERROR(H5E_DATASPACE, H5E_NOTFOUND, FAIL, "can't locate chunk in skip list")
+ /* Add the size of the chunk index to the encoded size */
+ mod_data_size += sizeof(hsize_t);
- /* Determine size of serialized chunk file dataspace, plus the size of
- * the data being written
- */
- if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to get encoded dataspace size")
+ /* Determine size of serialized chunk file dataspace */
+ if (H5S_encode(chunk_info->fspace, &mod_data_p, &space_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get encoded dataspace size")
+ mod_data_size += space_size;
+ /* Determine size of data being written */
iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace);
-
H5_CHECK_OVERFLOW(iter_nelmts, hsize_t, size_t);
+
mod_data_size += (size_t)iter_nelmts * type_info->src_type_size;
- if (NULL == (mod_data[num_send_requests] = (unsigned char *)H5MM_malloc(mod_data_size)))
+ if (NULL == (msg_send_bufs[num_send_requests] = H5MM_malloc(mod_data_size)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "couldn't allocate chunk modification send buffer")
+ "couldn't allocate chunk modification message buffer")
+
+ mod_data_p = msg_send_bufs[num_send_requests];
+
+ /* Store the chunk's index into the buffer */
+ HDmemcpy(mod_data_p, &chunk_entry->index_info.chunk_idx, sizeof(hsize_t));
+ mod_data_p += sizeof(hsize_t);
/* Serialize the chunk's file dataspace into the buffer */
- mod_data_p = mod_data[num_send_requests];
if (H5S_encode(chunk_info->fspace, &mod_data_p, &mod_data_size) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTENCODE, FAIL, "unable to encode dataspace")
/* Initialize iterator for memory selection */
- if (H5S_select_iter_init(mem_iter, chunk_info->mspace, type_info->src_type_size, 0) < 0)
+ if (H5S_select_iter_init(mem_iter, chunk_info->mspace, type_info->src_type_size,
+ H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
"unable to initialize memory selection information")
mem_iter_init = TRUE;
@@ -2952,466 +3943,2057 @@ H5D__chunk_redistribute_shared_chunks(const H5D_io_info_t *io_info, const H5D_ty
if (0 == H5D__gather_mem(io_info->u.wbuf, mem_iter, (size_t)iter_nelmts, mod_data_p))
HGOTO_ERROR(H5E_IO, H5E_CANTGATHER, FAIL, "couldn't gather from write buffer")
- /* Send modification data to new owner */
+ /*
+ * Ensure that the size of the chunk data being sent can be
+ * safely cast to an int for MPI. Note that this should
+ * generally be OK for now (unless a rank is sending a
+ * whole 32-bit-sized chunk of data + its encoded selection),
+ * but if we allow larger than 32-bit-sized chunks in the
+ * future, this may become a problem and derived datatypes
+ * will need to be used.
+ */
H5_CHECK_OVERFLOW(mod_data_size, size_t, int)
- H5_CHECK_OVERFLOW(chunk_entry->index, hsize_t, int)
+
+ /* Send modification data to new owner */
if (MPI_SUCCESS !=
- (mpi_code = MPI_Isend(mod_data[num_send_requests], (int)mod_data_size, MPI_BYTE,
- chunk_entry->owners.new_owner, (int)chunk_entry->index, io_info->comm,
- &send_requests[num_send_requests])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Isend failed", mpi_code)
+ (mpi_code = MPI_Issend(msg_send_bufs[num_send_requests], (int)mod_data_size, MPI_BYTE,
+ chunk_entry->new_owner, H5D_CHUNK_MOD_DATA_TAG, io_info->comm,
+ &send_requests[num_send_requests])))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Issend failed", mpi_code)
+
+ num_send_requests++;
+
+ /* Resize send buffer and send request arrays if necessary */
+ if (num_send_requests == send_bufs_nalloc) {
+ void *tmp_alloc;
+
+ send_bufs_nalloc = (size_t)((double)send_bufs_nalloc * 1.5);
+
+ if (NULL ==
+ (tmp_alloc = H5MM_realloc(msg_send_bufs, send_bufs_nalloc * sizeof(*msg_send_bufs))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
+ "couldn't resize chunk modification message buffer array")
+ msg_send_bufs = tmp_alloc;
+
+ if (NULL ==
+ (tmp_alloc = H5MM_realloc(send_requests, send_bufs_nalloc * sizeof(*send_requests))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't resize send requests array")
+ send_requests = tmp_alloc;
+ }
- if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release memory selection iterator")
mem_iter_init = FALSE;
+ }
+ }
- num_send_requests++;
- } /* end if */
- } /* end for */
+ /* Check if the number of send or receive requests will overflow an int (MPI requirement) */
+ if (num_send_requests > INT_MAX || num_msgs_incoming > INT_MAX)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
+ "too many shared chunks in parallel filtered write operation")
+
+ H5_CHECK_OVERFLOW(num_send_requests, size_t, int)
+ H5_CHECK_OVERFLOW(num_msgs_incoming, size_t, int)
+
+ /*
+ * Allocate receive buffer and MPI_Request arrays for non-blocking
+ * receives of incoming chunk messages
+ */
+ if (num_msgs_incoming) {
+ if (NULL == (msg_recv_bufs = H5MM_malloc(num_msgs_incoming * sizeof(*msg_recv_bufs))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
+ "couldn't allocate chunk modification message buffer array")
- /* Perform all the recvs on the chunks this rank owns */
- for (i = 0, last_assigned_idx = 0; i < *local_chunk_array_num_entries; i++) {
- H5D_filtered_collective_io_info_t *chunk_entry = &local_chunk_array[i];
+ if (NULL == (recv_requests = H5MM_malloc(num_msgs_incoming * sizeof(*recv_requests))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate receive requests array")
+ }
- if (mpi_rank == chunk_entry->owners.new_owner) {
- /* Allocate all necessary buffers for an asynchronous receive operation */
- if (chunk_entry->num_writers > 1) {
- MPI_Message message;
- MPI_Status status;
- size_t j;
+ /* Process any incoming messages until everyone is done */
+ do {
+ MPI_Status status;
+ int msg_flag;
- chunk_entry->async_info.num_receive_requests = (int)chunk_entry->num_writers - 1;
- if (NULL == (chunk_entry->async_info.receive_requests_array = (MPI_Request *)H5MM_malloc(
- (size_t)chunk_entry->async_info.num_receive_requests * sizeof(MPI_Request))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate async requests array")
+ /* Probe for an incoming message from any rank */
+ if (MPI_SUCCESS != (mpi_code = MPI_Iprobe(MPI_ANY_SOURCE, H5D_CHUNK_MOD_DATA_TAG, io_info->comm,
+ &msg_flag, &status)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Iprobe failed", mpi_code)
- if (NULL ==
- (chunk_entry->async_info.receive_buffer_array = (unsigned char **)H5MM_malloc(
- (size_t)chunk_entry->async_info.num_receive_requests * sizeof(unsigned char *))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate async receive buffers")
+ /*
+ * If a message was found, allocate a buffer for the message and
+ * post a non-blocking receive to receive it
+ */
+ if (msg_flag) {
+#if H5_CHECK_MPI_VERSION(3, 0)
+ MPI_Count msg_size = 0;
- for (j = 0; j < chunk_entry->num_writers - 1; j++) {
- int count = 0;
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&status, MPI_BYTE, &msg_size)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements_x failed", mpi_code)
- /* Probe for a particular message from any process, removing that message
- * from the receive queue in the process and allocating that much memory
- * for the asynchronous receive
- */
- if (MPI_SUCCESS != (mpi_code = MPI_Mprobe(MPI_ANY_SOURCE, (int)chunk_entry->index,
- io_info->comm, &message, &status)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Mprobe failed", mpi_code)
-
- if (MPI_SUCCESS != (mpi_code = MPI_Get_count(&status, MPI_BYTE, &count)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Get_count failed", mpi_code)
-
- HDassert(count >= 0);
- if (NULL == (chunk_entry->async_info.receive_buffer_array[j] =
- (unsigned char *)H5MM_malloc((size_t)count * sizeof(char *))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
- "unable to allocate modification data receive buffer")
-
- if (MPI_SUCCESS != (mpi_code = MPI_Imrecv(
- chunk_entry->async_info.receive_buffer_array[j], count, MPI_BYTE,
- &message, &chunk_entry->async_info.receive_requests_array[j])))
- HMPI_GOTO_ERROR(FAIL, "MPI_Imrecv failed", mpi_code)
- } /* end for */
- } /* end if */
-
- local_chunk_array[last_assigned_idx++] = local_chunk_array[i];
- } /* end else */
- } /* end for */
+ H5_CHECK_OVERFLOW(msg_size, MPI_Count, int)
+#else
+ int msg_size = 0;
- *local_chunk_array_num_entries = last_assigned_idx;
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&status, MPI_BYTE, &msg_size)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
+#endif
- /* Wait for all async send requests to complete before returning */
- if (num_send_requests) {
- if (NULL == (send_statuses = (MPI_Status *)H5MM_malloc(num_send_requests * sizeof(MPI_Status))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate send statuses buffer")
+ if (msg_size <= 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid chunk modification message size")
- H5_CHECK_OVERFLOW(num_send_requests, size_t, int);
- if (MPI_SUCCESS != (mpi_code = MPI_Waitall((int)num_send_requests, send_requests, send_statuses)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Waitall failed", mpi_code)
- } /* end if */
+ HDassert((num_recv_requests + 1) <= num_msgs_incoming);
+ if (NULL ==
+ (msg_recv_bufs[num_recv_requests] = H5MM_malloc((size_t)msg_size * sizeof(unsigned char))))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL,
+ "couldn't allocate chunk modification message receive buffer")
-done:
- /* Now that all async send requests have completed, free up the send
- * buffers used in the async operations
+ if (MPI_SUCCESS != (mpi_code = MPI_Irecv(msg_recv_bufs[num_recv_requests], (int)msg_size,
+ MPI_BYTE, status.MPI_SOURCE, H5D_CHUNK_MOD_DATA_TAG,
+ io_info->comm, &recv_requests[num_recv_requests])))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Irecv failed", mpi_code)
+
+ num_recv_requests++;
+ }
+
+ if (ibarrier_posted) {
+ int ibarrier_completed;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Test(&ibarrier, &ibarrier_completed, MPI_STATUS_IGNORE)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Test failed", mpi_code)
+
+ if (ibarrier_completed)
+ break;
+ }
+ else {
+ int all_sends_completed;
+
+ /* Determine if all send requests have completed */
+ if (MPI_SUCCESS != (mpi_code = MPI_Testall((int)num_send_requests, send_requests,
+ &all_sends_completed, MPI_STATUSES_IGNORE)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Testall failed", mpi_code)
+
+ if (all_sends_completed) {
+ /* Post non-blocking barrier */
+ if (MPI_SUCCESS != (mpi_code = MPI_Ibarrier(io_info->comm, &ibarrier)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Ibarrier failed", mpi_code)
+ ibarrier_posted = TRUE;
+
+ /*
+ * Now that all send requests have completed, free up the
+ * send buffers used in the non-blocking operations
+ */
+ if (msg_send_bufs) {
+ for (i = 0; i < num_send_requests; i++) {
+ if (msg_send_bufs[i])
+ H5MM_free(msg_send_bufs[i]);
+ }
+
+ msg_send_bufs = H5MM_xfree(msg_send_bufs);
+ }
+ }
+ }
+ } while (1);
+
+ /*
+ * Ensure all receive requests have completed before moving on.
+ * For linked-chunk I/O, more overlap with computation could
+ * theoretically be achieved by returning the receive requests
+ * array and postponing this wait until during chunk updating
+ * when the data is really needed. However, multi-chunk I/O
+ * only updates a chunk at a time and the messages may not come
+ * in the order that chunks are processed. So, the safest way to
+ * support both I/O modes is to simply make sure all messages
+ * are available.
*/
- for (i = 0; i < num_send_requests; i++) {
- if (mod_data[i])
- H5MM_free(mod_data[i]);
- } /* end for */
+ if (MPI_SUCCESS != (mpi_code = MPI_Waitall((int)num_recv_requests, recv_requests, MPI_STATUSES_IGNORE)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Waitall failed", mpi_code)
+
+ /* Set the new number of locally-selected chunks */
+ *chunk_list_num_entries = last_assigned_idx;
+
+ /* Return chunk message buffers if any were received */
+ *chunk_hash_table = chunk_table;
+ *chunk_msg_bufs = msg_recv_bufs;
+ *chunk_msg_bufs_len = (int)num_recv_requests;
+done:
+ if (ret_value < 0) {
+ /* If this rank failed, make sure to participate in collective barrier */
+ if (!ibarrier_posted) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Ibarrier(io_info->comm, &ibarrier)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Ibarrier failed", mpi_code)
+ }
+
+ if (num_send_requests) {
+ for (i = 0; i < num_send_requests; i++) {
+ MPI_Cancel(&send_requests[i]);
+ }
+ }
+
+ if (recv_requests) {
+ for (i = 0; i < num_recv_requests; i++) {
+ MPI_Cancel(&recv_requests[i]);
+ }
+ }
+
+ if (msg_recv_bufs) {
+ for (i = 0; i < num_recv_requests; i++) {
+ H5MM_free(msg_recv_bufs[i]);
+ }
+
+ H5MM_free(msg_recv_bufs);
+ }
+
+ HASH_CLEAR(hh, chunk_table);
+ }
+
+ if (recv_requests)
+ H5MM_free(recv_requests);
if (send_requests)
H5MM_free(send_requests);
- if (send_statuses)
- H5MM_free(send_statuses);
- if (send_counts)
- H5MM_free(send_counts);
- if (send_displacements)
- H5MM_free(send_displacements);
- if (mod_data)
- H5MM_free(mod_data);
- if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
- if (mem_iter)
- H5MM_free(mem_iter);
- if (num_assigned_chunks_array)
- H5MM_free(num_assigned_chunks_array);
- if (shared_chunks_info_array)
- H5MM_free(shared_chunks_info_array);
+
+ if (msg_send_bufs) {
+ for (i = 0; i < num_send_requests; i++) {
+ if (msg_send_bufs[i])
+ H5MM_free(msg_send_bufs[i]);
+ }
+
+ H5MM_free(msg_send_bufs);
+ }
+
+ if (mem_iter) {
+ if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release dataspace selection iterator")
+ mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter);
+ }
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__chunk_redistribute_shared_chunks() */
+#else
+ FUNC_ENTER_STATIC
+ HERROR(
+ H5E_DATASET, H5E_WRITEERROR,
+ "unable to send chunk modification data between MPI ranks - MPI version < 3 (MPI_Ibarrier missing)")
+ FUNC_LEAVE_NOAPI(FAIL)
#endif
+} /* end H5D__mpio_share_chunk_modification_data() */
/*-------------------------------------------------------------------------
- * Function: H5D__mpio_filtered_collective_write_type
+ * Function: H5D__mpio_collective_filtered_chunk_common_io
*
- * Purpose: Constructs a MPI derived datatype for both the memory and
- * the file for a collective write of filtered chunks. The
- * datatype contains the offsets in the file and the locations
- * of the filtered chunk data buffers.
+ * Purpose: This routine performs the common part of collective I/O
+ * when reading or writing filtered chunks collectively.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Jordan Henderson
- * Tuesday, November 22, 2016
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__mpio_filtered_collective_write_type(H5D_filtered_collective_io_info_t *chunk_list, size_t num_entries,
- MPI_Datatype *new_mem_type, hbool_t *mem_type_derived,
- MPI_Datatype *new_file_type, hbool_t *file_type_derived)
+H5D__mpio_collective_filtered_chunk_common_io(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, const H5D_io_info_t *io_info,
+ const H5D_type_info_t *type_info, int mpi_size)
{
- MPI_Aint *write_buf_array = NULL; /* Relative displacements of filtered chunk data buffers */
- MPI_Aint *file_offset_array = NULL; /* Chunk offsets in the file */
- int * length_array = NULL; /* Filtered Chunk lengths */
- herr_t ret_value = SUCCEED;
+ H5D_io_info_t coll_io_info;
+ H5D_storage_t ctg_store;
+ MPI_Datatype file_type = MPI_DATATYPE_NULL;
+ MPI_Datatype mem_type = MPI_DATATYPE_NULL;
+ hbool_t mem_type_is_derived = FALSE;
+ hbool_t file_type_is_derived = FALSE;
+ hsize_t mpi_buf_count;
+ haddr_t base_read_offset = HADDR_UNDEF;
+ size_t num_chunks;
+ size_t i;
+ char fake_buf; /* Used as a fake buffer for ranks with no chunks, thus a NULL buf pointer */
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
- HDassert(chunk_list);
- HDassert(new_mem_type);
- HDassert(mem_type_derived);
- HDassert(new_file_type);
- HDassert(file_type_derived);
+ HDassert(chunk_list || 0 == chunk_list_num_entries);
+ HDassert(io_info);
+ HDassert(type_info);
- if (num_entries > 0) {
- size_t i;
- int mpi_code;
- void * base_buf;
-
- H5_CHECK_OVERFLOW(num_entries, size_t, int);
-
- /* Allocate arrays */
- if (NULL == (length_array = (int *)H5MM_malloc((size_t)num_entries * sizeof(int))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for filtered collective write length array")
- if (NULL == (write_buf_array = (MPI_Aint *)H5MM_malloc((size_t)num_entries * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for filtered collective write buf length array")
- if (NULL == (file_offset_array = (MPI_Aint *)H5MM_malloc((size_t)num_entries * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
- "memory allocation failed for collective write offset array")
-
- /* Ensure the list is sorted in ascending order of offset in the file */
- HDqsort(chunk_list, num_entries, sizeof(H5D_filtered_collective_io_info_t),
- H5D__cmp_filtered_collective_io_info_entry);
+ /* Initialize temporary I/O info */
+ coll_io_info = *io_info;
- base_buf = chunk_list[0].buf;
- for (i = 0; i < num_entries; i++) {
- /* Set up the offset in the file, the length of the chunk data, and the relative
- * displacement of the chunk data write buffer
- */
- file_offset_array[i] = (MPI_Aint)chunk_list[i].chunk_states.new_chunk.offset;
- length_array[i] = (int)chunk_list[i].chunk_states.new_chunk.length;
- write_buf_array[i] = (MPI_Aint)chunk_list[i].buf - (MPI_Aint)base_buf;
- } /* end for */
+ /*
+ * Construct MPI derived datatype for collective I/O on chunks
+ */
+ if (H5D__mpio_collective_filtered_io_type(chunk_list, chunk_list_num_entries, io_info->op_type, &mem_type,
+ &mem_type_is_derived, &file_type, &file_type_is_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "couldn't create MPI I/O type for chunk I/O")
- /* Create memory MPI type */
- if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_entries, length_array,
- write_buf_array, MPI_BYTE, new_mem_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
- *mem_type_derived = TRUE;
- if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_mem_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
-
- /* Create file MPI type */
- if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_entries, length_array,
- file_offset_array, MPI_BYTE, new_file_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
- *file_type_derived = TRUE;
- if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_file_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
- } /* end if */
+ /*
+ * For reads, determine how many chunks are actually being read.
+ * Note that if this is a read during a write operation
+ * (read chunk -> unfilter -> modify -> write back), some
+ * chunks may not need to be read if they're being fully
+ * overwritten during a write operation.
+ */
+ if (io_info->op_type == H5D_IO_OP_READ) {
+ for (i = 0, num_chunks = 0; i < chunk_list_num_entries; i++) {
+ HDassert(chunk_list[i].buf);
+
+ if (chunk_list[i].need_read) {
+ if (!H5F_addr_defined(base_read_offset))
+ base_read_offset = chunk_list[i].chunk_current.offset;
+
+ num_chunks++;
+ }
+ }
+ }
+ else
+ num_chunks = chunk_list_num_entries;
+
+ /*
+ * If this rank doesn't have a selection, it can
+ * skip I/O if independent I/O was requested at
+ * the low level, or if the MPI communicator size
+ * is 1.
+ *
+ * Otherwise, this rank has to participate in
+ * collective I/O, but probably has a NULL buf
+ * pointer, so override to a fake buffer since our
+ * write/read function expects one.
+ */
+ if (num_chunks == 0) {
+ H5FD_mpio_collective_opt_t coll_opt_mode;
+
+ /* Get the collective_opt property to check whether the application wants to do IO individually. */
+ if (H5CX_get_mpio_coll_opt(&coll_opt_mode) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_opt property")
+
+ if ((mpi_size == 1) || (H5FD_MPIO_INDIVIDUAL_IO == coll_opt_mode)) {
+ HGOTO_DONE(SUCCEED)
+ }
+ else {
+ if (io_info->op_type == H5D_IO_OP_WRITE)
+ coll_io_info.u.wbuf = &fake_buf;
+ else
+ coll_io_info.u.rbuf = &fake_buf;
+ }
+ }
+
+ /*
+ * Setup for I/O operation
+ */
+
+ mpi_buf_count = (num_chunks) ? 1 : 0;
+
+ if (num_chunks) {
+ /*
+ * Setup the base storage address for this operation
+ * to be the first chunk's file address
+ */
+ if (io_info->op_type == H5D_IO_OP_WRITE)
+ ctg_store.contig.dset_addr = chunk_list[0].chunk_new.offset;
+ else
+ ctg_store.contig.dset_addr = base_read_offset;
+ }
+ else
+ ctg_store.contig.dset_addr = 0;
+
+ ctg_store.contig.dset_size = (hsize_t)io_info->dset->shared->layout.u.chunk.size;
+ coll_io_info.store = &ctg_store;
+
+ /* Perform I/O */
+ if (H5D__final_collective_io(&coll_io_info, type_info, mpi_buf_count, file_type, mem_type) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't finish MPI I/O")
done:
- if (write_buf_array)
- H5MM_free(write_buf_array);
- if (file_offset_array)
- H5MM_free(file_offset_array);
- if (length_array)
- H5MM_free(length_array);
+ /* Free the MPI buf and file types, if they were derived */
+ if (mem_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ if (file_type_is_derived && MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__mpio_filtered_collective_write_type() */
+} /* end H5D__mpio_collective_filtered_chunk_common_io() */
/*-------------------------------------------------------------------------
- * Function: H5D__filtered_collective_chunk_entry_io
+ * Function: H5D__mpio_collective_filtered_chunk_read
*
- * Purpose: Given an entry for a filtered chunk, performs the necessary
- * steps for updating the chunk data during a collective
- * write, or for reading the chunk from file during a
- * collective read.
+ * Purpose: This routine coordinates a collective read across all ranks
+ * of the chunks they have selected. Each rank will then go
+ * and
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Jordan Henderson
- * Wednesday, January 18, 2017
- *
*-------------------------------------------------------------------------
*/
static herr_t
-H5D__filtered_collective_chunk_entry_io(H5D_filtered_collective_io_info_t *chunk_entry,
- const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
- const H5D_chunk_map_t *fm)
+H5D__mpio_collective_filtered_chunk_read(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, const H5D_io_info_t *io_info,
+ const H5D_type_info_t *type_info, int mpi_rank, int mpi_size)
{
- H5D_chunk_info_t *chunk_info = NULL;
- H5S_sel_iter_t * mem_iter = NULL; /* Memory iterator for H5D__scatter_mem/H5D__gather_mem */
- H5S_sel_iter_t * file_iter = NULL;
- H5Z_EDC_t err_detect; /* Error detection info */
- H5Z_cb_t filter_cb; /* I/O filter callback function */
- unsigned filter_mask = 0;
- hsize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */
- hssize_t extent_npoints;
- hsize_t true_chunk_size;
- hbool_t mem_iter_init = FALSE;
- hbool_t file_iter_init = FALSE;
- size_t buf_size;
- size_t i;
- H5S_t * dataspace = NULL; /* Other process' dataspace for the chunk */
- void * tmp_gath_buf = NULL; /* Temporary gather buffer to gather into from application buffer
- before scattering out to the chunk data buffer (when writing data),
- or vice versa (when reading data) */
- int mpi_code;
- herr_t ret_value = SUCCEED;
+ H5D_fill_buf_info_t fb_info;
+ H5D_chunk_info_t * chunk_info = NULL;
+ H5D_io_info_t coll_io_info;
+ H5Z_EDC_t err_detect; /* Error detection info */
+ H5Z_cb_t filter_cb; /* I/O filter callback function */
+ hsize_t file_chunk_size = 0;
+ hsize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */
+ hbool_t should_fill = FALSE;
+ hbool_t fb_info_init = FALSE;
+ hbool_t index_empty = FALSE;
+ size_t i;
+ H5S_t * fill_space = NULL;
+ void * base_read_buf = NULL;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
- HDassert(chunk_entry);
+ HDassert(chunk_list || 0 == chunk_list_num_entries);
HDassert(io_info);
HDassert(type_info);
- HDassert(fm);
- /* Retrieve filter settings from API context */
- if (H5CX_get_err_detect(&err_detect) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info")
- if (H5CX_get_filter_cb(&filter_cb) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function")
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Filtered collective chunk read");
+#else
+ (void)mpi_rank;
+#endif
+
+ /* Initialize temporary I/O info */
+ coll_io_info = *io_info;
+ coll_io_info.u.rbuf = NULL;
- /* Look up the chunk and get its file and memory dataspaces */
- if (NULL == (chunk_info = (H5D_chunk_info_t *)H5SL_search(fm->sel_chunks, &chunk_entry->index)))
- HGOTO_ERROR(H5E_DATASPACE, H5E_NOTFOUND, FAIL, "can't locate chunk in skip list")
+ if (chunk_list_num_entries) {
+ /* Retrieve filter settings from API context */
+ if (H5CX_get_err_detect(&err_detect) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info")
+ if (H5CX_get_filter_cb(&filter_cb) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function")
- if ((extent_npoints = H5S_GET_EXTENT_NPOINTS(chunk_info->fspace)) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
- true_chunk_size = (hsize_t)extent_npoints * type_info->src_type_size;
+ /* Set size of full chunks in dataset */
+ file_chunk_size = io_info->dset->shared->layout.u.chunk.size;
+
+ /* Determine if fill values should be "read" for unallocated chunks */
+ should_fill = (io_info->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC) ||
+ ((io_info->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET) &&
+ io_info->dset->shared->dcpl_cache.fill.fill_defined);
+ }
- /* If the size of the filtered chunk is larger than the number of points in the
- * chunk file space extent times the datatype size, allocate enough space to hold the
- * whole filtered chunk. Otherwise, allocate a buffer equal to the size of the
- * chunk so that the unfiltering operation doesn't have to grow the buffer.
+ /*
+ * Allocate memory buffers for all chunks being read. Chunk data buffers are of
+ * the largest size between the chunk's current filtered size and the chunk's true
+ * size, as calculated by the number of elements in the chunk's file space extent
+ * multiplied by the datatype size. This tries to ensure that:
+ *
+ * * If we're reading the chunk and the filter normally reduces the chunk size,
+ * the unfiltering operation won't need to grow the buffer.
+ * * If we're reading the chunk and the filter normally grows the chunk size,
+ * we make sure to read into a buffer of size equal to the filtered chunk's
+ * size; reading into a (smaller) buffer of size equal to the unfiltered
+ * chunk size would of course be bad.
*/
- buf_size = MAX(chunk_entry->chunk_states.chunk_current.length, true_chunk_size);
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ HDassert(chunk_list[i].need_read);
+
+ chunk_list[i].chunk_buf_size = MAX(chunk_list[i].chunk_current.length, file_chunk_size);
+
+ if (NULL == (chunk_list[i].buf = H5MM_malloc(chunk_list[i].chunk_buf_size))) {
+ /* Push an error, but participate in collective read */
+ HDONE_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer")
+ break;
+ }
+
+ /*
+ * Check if chunk is currently allocated. If not, don't try to
+ * read it from the file. Instead, just fill the chunk buffer
+ * with the fill value if necessary.
+ */
+ if (H5F_addr_defined(chunk_list[i].chunk_current.offset)) {
+ /* Set first read buffer */
+ if (!base_read_buf)
+ base_read_buf = chunk_list[i].buf;
+
+ /* Set chunk's new length for eventual filter pipeline calls */
+ if (chunk_list[i].skip_filter_pline)
+ chunk_list[i].chunk_new.length = file_chunk_size;
+ else
+ chunk_list[i].chunk_new.length = chunk_list[i].chunk_current.length;
+ }
+ else {
+ chunk_list[i].need_read = FALSE;
+
+ /* Set chunk's new length for eventual filter pipeline calls */
+ chunk_list[i].chunk_new.length = file_chunk_size;
+
+ if (should_fill) {
+ /* Initialize fill value buffer if not already initialized */
+ if (!fb_info_init) {
+ hsize_t chunk_dims[H5S_MAX_RANK];
+
+ HDassert(io_info->dset->shared->ndims == io_info->dset->shared->layout.u.chunk.ndims - 1);
+ for (size_t j = 0; j < io_info->dset->shared->layout.u.chunk.ndims - 1; j++)
+ chunk_dims[j] = (hsize_t)io_info->dset->shared->layout.u.chunk.dim[j];
+
+ /* Get a dataspace for filling chunk memory buffers */
+ if (NULL == (fill_space = H5S_create_simple(
+ io_info->dset->shared->layout.u.chunk.ndims - 1, chunk_dims, NULL)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create chunk fill dataspace")
+
+ /* Initialize fill value buffer */
+ if (H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc,
+ (void *)&io_info->dset->shared->dcpl_cache.pline,
+ (H5MM_free_t)H5D__chunk_mem_free,
+ (void *)&io_info->dset->shared->dcpl_cache.pline,
+ &io_info->dset->shared->dcpl_cache.fill, io_info->dset->shared->type,
+ io_info->dset->shared->type_id, 0, file_chunk_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer")
+
+ fb_info_init = TRUE;
+ }
- if (NULL == (chunk_entry->buf = H5MM_malloc(buf_size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer")
+ /* Write fill value to memory buffer */
+ HDassert(fb_info.fill_buf);
+ if (H5D__fill(fb_info.fill_buf, io_info->dset->shared->type, chunk_list[i].buf,
+ type_info->mem_type, fill_space) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "couldn't fill chunk buffer with fill value")
+ }
+ }
+ }
- /* If this is not a full chunk overwrite or this is a read operation, the chunk must be
- * read from the file and unfiltered.
+ /*
+ * If dataset is incrementally allocated and hasn't been written to
+ * yet, the chunk index should be empty. In this case, a collective
+ * read of chunks is essentially a no-op, so avoid it here.
*/
- if (!chunk_entry->full_overwrite || io_info->op_type == H5D_IO_OP_READ) {
- H5FD_mpio_xfer_t xfer_mode; /* Parallel transfer for this request */
+ index_empty = FALSE;
+ if (io_info->dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR)
+ if (H5D__chunk_index_empty(io_info->dset, &index_empty) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty")
- chunk_entry->chunk_states.new_chunk.length = chunk_entry->chunk_states.chunk_current.length;
+ if (!index_empty) {
+ /*
+ * Override the read buffer to point to the address of
+ * the first chunk data buffer being read into
+ */
+ if (base_read_buf)
+ coll_io_info.u.rbuf = base_read_buf;
- /* Currently, these chunk reads are done independently and will likely
- * cause issues with collective metadata reads enabled. In the future,
- * this should be refactored to use collective chunk reads - JTH */
+ /* Perform collective chunk read */
+ if (H5D__mpio_collective_filtered_chunk_common_io(chunk_list, chunk_list_num_entries, &coll_io_info,
+ type_info, mpi_size) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't finish collective filtered chunk read")
+ }
- /* Get the original state of parallel I/O transfer mode */
- if (H5CX_get_io_xfer_mode(&xfer_mode) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode")
+ /*
+ * Iterate through all the read chunks, unfiltering them and scattering their
+ * data out to the application's read buffer.
+ */
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ chunk_info = chunk_list[i].chunk_info;
+
+ /* Unfilter the chunk, unless we didn't read it from the file */
+ if (chunk_list[i].need_read && !chunk_list[i].skip_filter_pline) {
+ if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE,
+ &(chunk_list[i].index_info.filter_mask), err_detect, filter_cb,
+ (size_t *)&chunk_list[i].chunk_new.length, &chunk_list[i].chunk_buf_size,
+ &chunk_list[i].buf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying")
+ }
+
+ /* Scatter the chunk data to the read buffer */
+ iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace);
+
+ if (H5D_select_io_mem(io_info->u.rbuf, chunk_info->mspace, chunk_list[i].buf, chunk_info->fspace,
+ type_info->src_type_size, (size_t)iter_nelmts) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't copy chunk data to read buffer")
+ }
- /* Change the xfer_mode to independent for handling the I/O */
- if (H5CX_set_io_xfer_mode(H5FD_MPIO_INDEPENDENT) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode")
+done:
+ /* Free all resources used by entries in the chunk list */
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ if (chunk_list[i].buf) {
+ H5MM_free(chunk_list[i].buf);
+ chunk_list[i].buf = NULL;
+ }
+ }
- if (H5F_shared_block_read(io_info->f_sh, H5FD_MEM_DRAW,
- chunk_entry->chunk_states.chunk_current.offset,
- chunk_entry->chunk_states.new_chunk.length, chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read raw data chunk")
+ /* Release the fill buffer info, if it's been initialized */
+ if (fb_info_init && H5D__fill_term(&fb_info) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
+ if (fill_space && (H5S_close(fill_space) < 0))
+ HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space")
- /* Return to the original I/O transfer mode setting */
- if (H5CX_set_io_xfer_mode(xfer_mode) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set MPI-I/O transfer mode")
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
- if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE, &filter_mask, err_detect,
- filter_cb, (size_t *)&chunk_entry->chunk_states.new_chunk.length, &buf_size,
- &chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying")
- } /* end if */
- else {
- chunk_entry->chunk_states.new_chunk.length = true_chunk_size;
- } /* end else */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_collective_filtered_chunk_read() */
- /* Initialize iterator for memory selection */
- if (NULL == (mem_iter = (H5S_sel_iter_t *)H5MM_malloc(sizeof(H5S_sel_iter_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator")
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_collective_filtered_chunk_update
+ *
+ * Purpose: When performing a parallel write on a chunked dataset with
+ * filters applied, all ranks must update their owned chunks
+ * with their own modification data and data from other ranks.
+ * This routine is responsible for coordinating that process.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_collective_filtered_chunk_update(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries,
+ H5D_filtered_collective_io_info_t *chunk_hash_table,
+ unsigned char **chunk_msg_bufs, int chunk_msg_bufs_len,
+ const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
+ int mpi_rank, int mpi_size)
+{
+ H5D_fill_buf_info_t fb_info;
+ H5D_chunk_info_t * chunk_info = NULL;
+ H5S_sel_iter_t * sel_iter = NULL; /* Dataspace selection iterator for H5D__scatter_mem */
+ H5D_io_info_t coll_io_info;
+ H5Z_EDC_t err_detect; /* Error detection info */
+ H5Z_cb_t filter_cb; /* I/O filter callback function */
+ hsize_t file_chunk_size = 0;
+ hsize_t iter_nelmts; /* Number of points to iterate over for the chunk IO operation */
+ hbool_t should_fill = FALSE;
+ hbool_t fb_info_init = FALSE;
+ hbool_t sel_iter_init = FALSE;
+ hbool_t index_empty = FALSE;
+ size_t i;
+ H5S_t * dataspace = NULL;
+ H5S_t * fill_space = NULL;
+ void * base_read_buf = NULL;
+ herr_t ret_value = SUCCEED;
- if (H5S_select_iter_init(mem_iter, chunk_info->mspace, type_info->src_type_size, 0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize memory selection information")
- mem_iter_init = TRUE;
+ FUNC_ENTER_STATIC
- /* If this is a read operation, scatter the read chunk data to the user's buffer.
- *
- * If this is a write operation, update the chunk data buffer with the modifications
- * from the current process, then apply any modifications from other processes. Finally,
- * filter the newly-updated chunk.
- */
- switch (io_info->op_type) {
- case H5D_IO_OP_READ:
- if (NULL == (file_iter = (H5S_sel_iter_t *)H5MM_malloc(sizeof(H5S_sel_iter_t))))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate file iterator")
+ HDassert(chunk_list || 0 == chunk_list_num_entries);
+ HDassert((chunk_msg_bufs && chunk_hash_table) || 0 == chunk_msg_bufs_len);
+ HDassert(io_info);
+ HDassert(type_info);
- if (H5S_select_iter_init(file_iter, chunk_info->fspace, type_info->src_type_size, 0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to initialize memory selection information")
- file_iter_init = TRUE;
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Filtered collective chunk update");
+#endif
+
+ if (chunk_list_num_entries) {
+ /* Retrieve filter settings from API context */
+ if (H5CX_get_err_detect(&err_detect) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get error detection info")
+ if (H5CX_get_filter_cb(&filter_cb) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get I/O filter callback function")
- iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace);
+ /* Set size of full chunks in dataset */
+ file_chunk_size = io_info->dset->shared->layout.u.chunk.size;
- if (NULL == (tmp_gath_buf = H5MM_malloc(iter_nelmts * type_info->src_type_size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer")
+ /* Determine if fill values should be written to chunks */
+ should_fill = (io_info->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_ALLOC) ||
+ ((io_info->dset->shared->dcpl_cache.fill.fill_time == H5D_FILL_TIME_IFSET) &&
+ io_info->dset->shared->dcpl_cache.fill.fill_defined);
+ }
- if (!H5D__gather_mem(chunk_entry->buf, file_iter, (size_t)iter_nelmts, tmp_gath_buf))
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't gather from chunk buffer")
+ /*
+ * Allocate memory buffers for all owned chunks. Chunk data buffers are of the
+ * largest size between the chunk's current filtered size and the chunk's true
+ * size, as calculated by the number of elements in the chunk's file space extent
+ * multiplied by the datatype size. This tries to ensure that:
+ *
+ * * If we're fully overwriting the chunk and the filter normally reduces the
+ * chunk size, we simply have the exact buffer size required to hold the
+ * unfiltered chunk data.
+ * * If we're fully overwriting the chunk and the filter normally grows the
+ * chunk size (e.g., fletcher32 filter), the final filtering operation
+ * (hopefully) won't need to grow the buffer.
+ * * If we're reading the chunk and the filter normally reduces the chunk size,
+ * the unfiltering operation won't need to grow the buffer.
+ * * If we're reading the chunk and the filter normally grows the chunk size,
+ * we make sure to read into a buffer of size equal to the filtered chunk's
+ * size; reading into a (smaller) buffer of size equal to the unfiltered
+ * chunk size would of course be bad.
+ */
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ HDassert(mpi_rank == chunk_list[i].new_owner);
- iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace);
+ chunk_list[i].chunk_buf_size = MAX(chunk_list[i].chunk_current.length, file_chunk_size);
- if (H5D__scatter_mem(tmp_gath_buf, mem_iter, (size_t)iter_nelmts, io_info->u.rbuf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to read buffer")
+ /*
+ * If this chunk hasn't been allocated yet and we aren't writing
+ * out fill values to it, make sure to 0-fill its memory buffer
+ * so we don't use uninitialized memory.
+ */
+ if (!H5F_addr_defined(chunk_list[i].chunk_current.offset) && !should_fill)
+ chunk_list[i].buf = H5MM_calloc(chunk_list[i].chunk_buf_size);
+ else
+ chunk_list[i].buf = H5MM_malloc(chunk_list[i].chunk_buf_size);
+ if (NULL == chunk_list[i].buf) {
+ /* Push an error, but participate in collective read */
+ HDONE_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk data buffer")
break;
+ }
+
+ /* Set chunk's new length for eventual filter pipeline calls */
+ if (chunk_list[i].need_read) {
+ /*
+ * Check if chunk is currently allocated. If not, don't try to
+ * read it from the file. Instead, just fill the chunk buffer
+ * with the fill value if fill values are to be written.
+ */
+ if (H5F_addr_defined(chunk_list[i].chunk_current.offset)) {
+ /* Set first read buffer */
+ if (!base_read_buf)
+ base_read_buf = chunk_list[i].buf;
+
+ /* Set chunk's new length for eventual filter pipeline calls */
+ if (chunk_list[i].skip_filter_pline)
+ chunk_list[i].chunk_new.length = file_chunk_size;
+ else
+ chunk_list[i].chunk_new.length = chunk_list[i].chunk_current.length;
+ }
+ else {
+ chunk_list[i].need_read = FALSE;
+
+ /* Set chunk's new length for eventual filter pipeline calls */
+ chunk_list[i].chunk_new.length = file_chunk_size;
+
+ if (should_fill) {
+ /* Initialize fill value buffer if not already initialized */
+ if (!fb_info_init) {
+ hsize_t chunk_dims[H5S_MAX_RANK];
+
+ HDassert(io_info->dset->shared->ndims ==
+ io_info->dset->shared->layout.u.chunk.ndims - 1);
+ for (size_t j = 0; j < io_info->dset->shared->layout.u.chunk.ndims - 1; j++)
+ chunk_dims[j] = (hsize_t)io_info->dset->shared->layout.u.chunk.dim[j];
+
+ /* Get a dataspace for filling chunk memory buffers */
+ if (NULL == (fill_space = H5S_create_simple(
+ io_info->dset->shared->layout.u.chunk.ndims - 1, chunk_dims, NULL)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "unable to create chunk fill dataspace")
+
+ /* Initialize fill value buffer */
+ if (H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc,
+ (void *)&io_info->dset->shared->dcpl_cache.pline,
+ (H5MM_free_t)H5D__chunk_mem_free,
+ (void *)&io_info->dset->shared->dcpl_cache.pline,
+ &io_info->dset->shared->dcpl_cache.fill,
+ io_info->dset->shared->type, io_info->dset->shared->type_id, 0,
+ file_chunk_size) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill value buffer")
+
+ fb_info_init = TRUE;
+ }
+
+ /* Write fill value to memory buffer */
+ HDassert(fb_info.fill_buf);
+ if (H5D__fill(fb_info.fill_buf, io_info->dset->shared->type, chunk_list[i].buf,
+ type_info->mem_type, fill_space) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
+ "couldn't fill chunk buffer with fill value")
+ }
+ }
+ }
+ else
+ chunk_list[i].chunk_new.length = file_chunk_size;
+ }
- case H5D_IO_OP_WRITE:
- iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace);
+ /*
+ * If dataset is incrementally allocated and hasn't been written to
+ * yet, the chunk index should be empty. In this case, a collective
+ * read of chunks is essentially a no-op, so avoid it here.
+ */
+ index_empty = FALSE;
+ if (io_info->dset->shared->dcpl_cache.fill.alloc_time == H5D_ALLOC_TIME_INCR)
+ if (H5D__chunk_index_empty(io_info->dset, &index_empty) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "couldn't determine if chunk index is empty")
- if (NULL == (tmp_gath_buf = H5MM_malloc(iter_nelmts * type_info->src_type_size)))
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate temporary gather buffer")
+ if (!index_empty) {
+ /*
+ * Setup for I/O operation
+ */
- /* Gather modification data from the application write buffer into a temporary buffer */
- if (0 == H5D__gather_mem(io_info->u.wbuf, mem_iter, (size_t)iter_nelmts, tmp_gath_buf))
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "couldn't gather from write buffer")
+ /* Initialize temporary I/O info */
+ coll_io_info = *io_info;
+ coll_io_info.op_type = H5D_IO_OP_READ;
- if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
- mem_iter_init = FALSE;
+ /* Override the read buffer to point to the address of the first
+ * chunk data buffer being read into
+ */
+ if (base_read_buf)
+ coll_io_info.u.rbuf = base_read_buf;
- /* Initialize iterator for file selection */
- if (H5S_select_iter_init(mem_iter, chunk_info->fspace, type_info->dst_type_size, 0) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
- "unable to initialize file selection information")
- mem_iter_init = TRUE;
+ /* Read all chunks that need to be read from the file */
+ if (H5D__mpio_collective_filtered_chunk_common_io(chunk_list, chunk_list_num_entries, &coll_io_info,
+ type_info, mpi_size) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "couldn't finish collective filtered chunk read")
+ }
- iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->fspace);
+ /*
+ * Now that all owned chunks have been read, update the chunks
+ * with modification data from the owning rank and other ranks.
+ */
- /* Scatter the owner's modification data into the chunk data buffer according to
- * the file space.
- */
- if (H5D__scatter_mem(tmp_gath_buf, mem_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "couldn't scatter to chunk data buffer")
+ /* Process all chunks with data from the owning rank first */
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ HDassert(mpi_rank == chunk_list[i].new_owner);
- if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
- mem_iter_init = FALSE;
+ chunk_info = chunk_list[i].chunk_info;
- if (MPI_SUCCESS !=
- (mpi_code = MPI_Waitall(chunk_entry->async_info.num_receive_requests,
- chunk_entry->async_info.receive_requests_array, MPI_STATUSES_IGNORE)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Waitall failed", mpi_code)
+ /*
+ * If this chunk wasn't being fully overwritten, we read it from
+ * the file, so we need to unfilter it
+ */
+ if (chunk_list[i].need_read && !chunk_list[i].skip_filter_pline) {
+ if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, H5Z_FLAG_REVERSE,
+ &(chunk_list[i].index_info.filter_mask), err_detect, filter_cb,
+ (size_t *)&chunk_list[i].chunk_new.length, &chunk_list[i].chunk_buf_size,
+ &chunk_list[i].buf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTFILTER, FAIL, "couldn't unfilter chunk for modifying")
+ }
+
+ iter_nelmts = H5S_GET_SELECT_NPOINTS(chunk_info->mspace);
+
+ if (H5D_select_io_mem(chunk_list[i].buf, chunk_info->fspace, io_info->u.wbuf, chunk_info->mspace,
+ type_info->dst_type_size, (size_t)iter_nelmts) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't copy chunk data to write buffer")
+ }
- /* For each asynchronous receive call previously posted, receive the chunk modification
- * buffer from another rank and update the chunk data
- */
- for (i = 0; i < (size_t)chunk_entry->async_info.num_receive_requests; i++) {
- const unsigned char *mod_data_p;
+ /* Allocate iterator for memory selection */
+ if (NULL == (sel_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate memory iterator")
- /* Decode the process' chunk file dataspace */
- mod_data_p = chunk_entry->async_info.receive_buffer_array[i];
- if (NULL == (dataspace = H5S_decode(&mod_data_p)))
+ /* Now process all received chunk message buffers */
+ for (i = 0; i < (size_t)chunk_msg_bufs_len; i++) {
+ H5D_filtered_collective_io_info_t *chunk_entry = NULL;
+ const unsigned char * msg_ptr = chunk_msg_bufs[i];
+ hsize_t chunk_idx;
+
+ if (msg_ptr) {
+ /* Retrieve the chunk's index value */
+ HDmemcpy(&chunk_idx, msg_ptr, sizeof(hsize_t));
+ msg_ptr += sizeof(hsize_t);
+
+ /* Find the chunk entry according to its chunk index */
+ HASH_FIND(hh, chunk_hash_table, &chunk_idx, sizeof(hsize_t), chunk_entry);
+ HDassert(chunk_entry);
+ HDassert(mpi_rank == chunk_entry->new_owner);
+
+ /*
+ * Only process the chunk if its data buffer is allocated.
+ * In the case of multi-chunk I/O, we're only working on
+ * a chunk at a time, so we need to skip over messages
+ * that aren't for the chunk we're currently working on.
+ */
+ if (!chunk_entry->buf)
+ continue;
+ else {
+ /* Decode the chunk file dataspace from the message */
+ if (NULL == (dataspace = H5S_decode(&msg_ptr)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTDECODE, FAIL, "unable to decode dataspace")
- if (H5S_select_iter_init(mem_iter, dataspace, type_info->dst_type_size, 0) < 0)
+ if (H5S_select_iter_init(sel_iter, dataspace, type_info->dst_type_size,
+ H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL,
"unable to initialize memory selection information")
- mem_iter_init = TRUE;
+ sel_iter_init = TRUE;
iter_nelmts = H5S_GET_SELECT_NPOINTS(dataspace);
/* Update the chunk data with the received modification data */
- if (H5D__scatter_mem(mod_data_p, mem_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0)
+ if (H5D__scatter_mem(msg_ptr, sel_iter, (size_t)iter_nelmts, chunk_entry->buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "couldn't scatter to write buffer")
- if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ if (H5S_SELECT_ITER_RELEASE(sel_iter) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
- mem_iter_init = FALSE;
+ sel_iter_init = FALSE;
+
if (dataspace) {
if (H5S_close(dataspace) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace")
dataspace = NULL;
}
- H5MM_free(chunk_entry->async_info.receive_buffer_array[i]);
- } /* end for */
- /* Filter the chunk */
- if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, 0, &filter_mask, err_detect, filter_cb,
- (size_t *)&chunk_entry->chunk_states.new_chunk.length, &buf_size,
- &chunk_entry->buf) < 0)
+ H5MM_free(chunk_msg_bufs[i]);
+ chunk_msg_bufs[i] = NULL;
+ }
+ }
+ }
+
+ /* Finally, filter all the chunks */
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ if (!chunk_list[i].skip_filter_pline) {
+ if (H5Z_pipeline(&io_info->dset->shared->dcpl_cache.pline, 0,
+ &(chunk_list[i].index_info.filter_mask), err_detect, filter_cb,
+ (size_t *)&chunk_list[i].chunk_new.length, &chunk_list[i].chunk_buf_size,
+ &chunk_list[i].buf) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
+ }
#if H5_SIZEOF_SIZE_T > 4
- /* Check for the chunk expanding too much to encode in a 32-bit value */
- if (chunk_entry->chunk_states.new_chunk.length > ((size_t)0xffffffff))
- HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
+ /* Check for the chunk expanding too much to encode in a 32-bit value */
+ if (chunk_list[i].chunk_new.length > ((size_t)0xffffffff))
+ HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
#endif
- break;
+ }
- default:
- HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "invalid I/O operation")
- } /* end switch */
+done:
+ if (sel_iter) {
+ if (sel_iter_init && H5S_SELECT_ITER_RELEASE(sel_iter) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
+ sel_iter = H5FL_FREE(H5S_sel_iter_t, sel_iter);
+ }
+ if (dataspace && (H5S_close(dataspace) < 0))
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace")
+ if (fill_space && (H5S_close(fill_space) < 0))
+ HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "can't close fill space")
+
+ /* Release the fill buffer info, if it's been initialized */
+ if (fb_info_init && H5D__fill_term(&fb_info) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
+
+ /* On failure, try to free all resources used by entries in the chunk list */
+ if (ret_value < 0) {
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ if (chunk_list[i].buf) {
+ H5MM_free(chunk_list[i].buf);
+ chunk_list[i].buf = NULL;
+ }
+ }
+ }
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_collective_filtered_chunk_update() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_collective_filtered_chunk_reallocate
+ *
+ * Purpose: When performing a parallel write on a chunked dataset with
+ * filters applied, all ranks must eventually get together and
+ * perform a collective reallocation of space in the file for
+ * all chunks that were modified on all ranks. This routine is
+ * responsible for coordinating that process.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_collective_filtered_chunk_reallocate(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, size_t *num_chunks_assigned_map,
+ H5D_io_info_t *io_info, H5D_chk_idx_info_t *idx_info,
+ int mpi_rank, int mpi_size)
+{
+ H5D_chunk_alloc_info_t *collective_list = NULL;
+ MPI_Datatype send_type;
+ MPI_Datatype recv_type;
+ hbool_t send_type_derived = FALSE;
+ hbool_t recv_type_derived = FALSE;
+ hbool_t need_sort = FALSE;
+ size_t collective_num_entries = 0;
+ size_t num_local_chunks_processed = 0;
+ size_t i;
+ void * gathered_array = NULL;
+ int * counts_disps_array = NULL;
+ int * counts_ptr = NULL;
+ int * displacements_ptr = NULL;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(chunk_list || 0 == chunk_list_num_entries);
+ HDassert(io_info);
+ HDassert(idx_info);
+ HDassert(idx_info->storage->idx_type != H5D_CHUNK_IDX_NONE);
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Reallocation of chunk file space");
+#endif
+
+ /*
+ * Make sure it's safe to cast this rank's number
+ * of chunks to be sent into an int for MPI
+ */
+ H5_CHECK_OVERFLOW(chunk_list_num_entries, size_t, int);
+
+ /* Create derived datatypes for the chunk file space info needed */
+ if (H5D__mpio_get_chunk_alloc_info_types(&recv_type, &recv_type_derived, &send_type, &send_type_derived) <
+ 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "can't create derived datatypes for chunk file space info")
+
+ /*
+ * Gather the new chunk sizes to all ranks for a collective reallocation
+ * of the chunks in the file.
+ */
+ if (num_chunks_assigned_map) {
+ /*
+ * If a mapping between rank value -> number of assigned chunks has
+ * been provided (usually during linked-chunk I/O), we can use this
+ * to optimize MPI overhead a bit since MPI ranks won't need to
+ * first inform each other about how many chunks they're contributing.
+ */
+ if (NULL == (counts_disps_array = H5MM_malloc(2 * (size_t)mpi_size * sizeof(*counts_disps_array)))) {
+ /* Push an error, but still participate in collective gather operation */
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "couldn't allocate receive counts and displacements array")
+ }
+ else {
+ /* Set the receive counts from the assigned chunks map */
+ counts_ptr = counts_disps_array;
+
+ for (i = 0; i < (size_t)mpi_size; i++)
+ H5_CHECKED_ASSIGN(counts_ptr[i], int, num_chunks_assigned_map[i], size_t);
+
+ /* Set the displacements into the receive buffer for the gather operation */
+ displacements_ptr = &counts_disps_array[mpi_size];
+
+ *displacements_ptr = 0;
+ for (i = 1; i < (size_t)mpi_size; i++)
+ displacements_ptr[i] = displacements_ptr[i - 1] + counts_ptr[i - 1];
+ }
+
+ /* Perform gather operation */
+ if (H5_mpio_gatherv_alloc(chunk_list, (int)chunk_list_num_entries, send_type, counts_ptr,
+ displacements_ptr, recv_type, TRUE, 0, io_info->comm, mpi_rank, mpi_size,
+ &gathered_array, &collective_num_entries) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "can't gather chunk file space info to/from ranks")
+ }
+ else {
+ /*
+ * If no mapping between rank value -> number of assigned chunks has
+ * been provided (usually during multi-chunk I/O), all MPI ranks will
+ * need to first inform other ranks about how many chunks they're
+ * contributing before performing the actual gather operation. Use
+ * the 'simple' MPI_Allgatherv wrapper for this.
+ */
+ if (H5_mpio_gatherv_alloc_simple(chunk_list, (int)chunk_list_num_entries, send_type, recv_type, TRUE,
+ 0, io_info->comm, mpi_rank, mpi_size, &gathered_array,
+ &collective_num_entries) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL, "can't gather chunk file space info to/from ranks")
+ }
+
+ /* Collectively re-allocate the modified chunks (from each rank) in the file */
+ collective_list = (H5D_chunk_alloc_info_t *)gathered_array;
+ for (i = 0, num_local_chunks_processed = 0; i < collective_num_entries; i++) {
+ H5D_chunk_alloc_info_t *coll_entry = &collective_list[i];
+ hbool_t need_insert;
+ hbool_t update_local_chunk;
+
+ if (H5D__chunk_file_alloc(idx_info, &coll_entry->chunk_current, &coll_entry->chunk_new, &need_insert,
+ NULL) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
+
+ /*
+ * If we just re-allocated a chunk that is local to this
+ * rank, make sure to update the chunk entry in the local
+ * chunk list
+ */
+ update_local_chunk =
+ (num_local_chunks_processed < chunk_list_num_entries) &&
+ (coll_entry->chunk_idx == chunk_list[num_local_chunks_processed].index_info.chunk_idx);
+
+ if (update_local_chunk) {
+ H5D_filtered_collective_io_info_t *local_chunk;
+
+ local_chunk = &chunk_list[num_local_chunks_processed];
+
+ /* Sanity check that this chunk is actually local */
+ HDassert(mpi_rank == local_chunk->orig_owner);
+ HDassert(mpi_rank == local_chunk->new_owner);
+
+ local_chunk->chunk_new = coll_entry->chunk_new;
+ local_chunk->index_info.need_insert = need_insert;
+
+ /*
+ * Since chunk reallocation can move chunks around, check if
+ * the local chunk list is still in ascending offset of order
+ * in the file
+ */
+ if (num_local_chunks_processed) {
+ haddr_t curr_chunk_offset = local_chunk->chunk_new.offset;
+ haddr_t prev_chunk_offset = chunk_list[num_local_chunks_processed - 1].chunk_new.offset;
+
+ HDassert(H5F_addr_defined(prev_chunk_offset) && H5F_addr_defined(curr_chunk_offset));
+ if (curr_chunk_offset < prev_chunk_offset)
+ need_sort = TRUE;
+ }
+
+ num_local_chunks_processed++;
+ }
+ }
+
+ HDassert(chunk_list_num_entries == num_local_chunks_processed);
+
+ /*
+ * Ensure this rank's local chunk list is sorted in
+ * ascending order of offset in the file
+ */
+ if (need_sort)
+ HDqsort(chunk_list, chunk_list_num_entries, sizeof(H5D_filtered_collective_io_info_t),
+ H5D__cmp_filtered_collective_io_info_entry);
+
+done:
+ H5MM_free(gathered_array);
+ H5MM_free(counts_disps_array);
+
+ if (send_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&send_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+ if (recv_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&recv_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__mpio_collective_filtered_chunk_reallocate() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_collective_filtered_chunk_reinsert
+ *
+ * Purpose: When performing a parallel write on a chunked dataset with
+ * filters applied, all ranks must eventually get together and
+ * perform a collective reinsertion into the dataset's chunk
+ * index of chunks that were modified. This routine is
+ * responsible for coordinating that process.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_collective_filtered_chunk_reinsert(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, size_t *num_chunks_assigned_map,
+ H5D_io_info_t *io_info, H5D_chk_idx_info_t *idx_info,
+ int mpi_rank, int mpi_size)
+{
+ H5D_chunk_ud_t chunk_ud;
+ MPI_Datatype send_type;
+ MPI_Datatype recv_type;
+ hbool_t send_type_derived = FALSE;
+ hbool_t recv_type_derived = FALSE;
+ hsize_t scaled_coords[H5O_LAYOUT_NDIMS];
+ size_t collective_num_entries = 0;
+ size_t i;
+ void * gathered_array = NULL;
+ int * counts_disps_array = NULL;
+ int * counts_ptr = NULL;
+ int * displacements_ptr = NULL;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(chunk_list || 0 == chunk_list_num_entries);
+ HDassert(io_info);
+ HDassert(idx_info);
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TRACE_ENTER(mpi_rank);
+ H5D_MPIO_TIME_START(mpi_rank, "Reinsertion of modified chunks into chunk index");
+#endif
+
+ /* Only re-insert chunks if index has an insert method */
+ if (!idx_info->storage->ops->insert)
+ HGOTO_DONE(SUCCEED);
+
+ /*
+ * Make sure it's safe to cast this rank's number
+ * of chunks to be sent into an int for MPI
+ */
+ H5_CHECK_OVERFLOW(chunk_list_num_entries, size_t, int);
+
+ /* Create derived datatypes for the chunk re-insertion info needed */
+ if (H5D__mpio_get_chunk_insert_info_types(&recv_type, &recv_type_derived, &send_type,
+ &send_type_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL,
+ "can't create derived datatypes for chunk re-insertion info")
+
+ /*
+ * Gather information to all ranks for a collective re-insertion
+ * of the modified chunks into the chunk index
+ */
+ if (num_chunks_assigned_map) {
+ /*
+ * If a mapping between rank value -> number of assigned chunks has
+ * been provided (usually during linked-chunk I/O), we can use this
+ * to optimize MPI overhead a bit since MPI ranks won't need to
+ * first inform each other about how many chunks they're contributing.
+ */
+ if (NULL == (counts_disps_array = H5MM_malloc(2 * (size_t)mpi_size * sizeof(*counts_disps_array)))) {
+ /* Push an error, but still participate in collective gather operation */
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "couldn't allocate receive counts and displacements array")
+ }
+ else {
+ /* Set the receive counts from the assigned chunks map */
+ counts_ptr = counts_disps_array;
+
+ for (i = 0; i < (size_t)mpi_size; i++)
+ H5_CHECKED_ASSIGN(counts_ptr[i], int, num_chunks_assigned_map[i], size_t);
+
+ /* Set the displacements into the receive buffer for the gather operation */
+ displacements_ptr = &counts_disps_array[mpi_size];
+
+ *displacements_ptr = 0;
+ for (i = 1; i < (size_t)mpi_size; i++)
+ displacements_ptr[i] = displacements_ptr[i - 1] + counts_ptr[i - 1];
+ }
+
+ /* Perform gather operation */
+ if (H5_mpio_gatherv_alloc(chunk_list, (int)chunk_list_num_entries, send_type, counts_ptr,
+ displacements_ptr, recv_type, TRUE, 0, io_info->comm, mpi_rank, mpi_size,
+ &gathered_array, &collective_num_entries) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
+ "can't gather chunk index re-insertion info to/from ranks")
+ }
+ else {
+ /*
+ * If no mapping between rank value -> number of assigned chunks has
+ * been provided (usually during multi-chunk I/O), all MPI ranks will
+ * need to first inform other ranks about how many chunks they're
+ * contributing before performing the actual gather operation. Use
+ * the 'simple' MPI_Allgatherv wrapper for this.
+ */
+ if (H5_mpio_gatherv_alloc_simple(chunk_list, (int)chunk_list_num_entries, send_type, recv_type, TRUE,
+ 0, io_info->comm, mpi_rank, mpi_size, &gathered_array,
+ &collective_num_entries) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGATHER, FAIL,
+ "can't gather chunk index re-insertion info to/from ranks")
+ }
+
+ /* Initialize static chunk udata fields from chunk index info */
+ H5D_MPIO_INIT_CHUNK_UD_INFO(chunk_ud, idx_info);
+
+ for (i = 0; i < collective_num_entries; i++) {
+ H5D_chunk_insert_info_t *coll_entry = &((H5D_chunk_insert_info_t *)gathered_array)[i];
+
+ /*
+ * We only need to reinsert this chunk if we had to actually
+ * allocate or reallocate space in the file for it
+ */
+ if (!coll_entry->index_info.need_insert)
+ continue;
+
+ chunk_ud.chunk_block = coll_entry->chunk_block;
+ chunk_ud.chunk_idx = coll_entry->index_info.chunk_idx;
+ chunk_ud.filter_mask = coll_entry->index_info.filter_mask;
+ chunk_ud.common.scaled = scaled_coords;
+
+ /* Calculate scaled coordinates for the chunk */
+ if (idx_info->layout->idx_type == H5D_CHUNK_IDX_EARRAY && idx_info->layout->u.earray.unlim_dim > 0) {
+ /*
+ * Extensible arrays where the unlimited dimension is not
+ * the slowest-changing dimension "swizzle" the coordinates
+ * to move the unlimited dimension value to offset 0. Therefore,
+ * we use the "swizzled" down chunks to calculate the "swizzled"
+ * scaled coordinates and then we undo the "swizzle" operation.
+ *
+ * TODO: In the future, this is something that should be handled
+ * by the particular chunk index rather than manually
+ * here. Likely, the chunk index ops should get a new
+ * callback that accepts a chunk index and provides the
+ * caller with the scaled coordinates for that chunk.
+ */
+ H5VM_array_calc_pre(chunk_ud.chunk_idx, io_info->dset->shared->ndims,
+ idx_info->layout->u.earray.swizzled_down_chunks, scaled_coords);
+
+ H5VM_unswizzle_coords(hsize_t, scaled_coords, idx_info->layout->u.earray.unlim_dim);
+ }
+ else {
+ H5VM_array_calc_pre(chunk_ud.chunk_idx, io_info->dset->shared->ndims,
+ io_info->dset->shared->layout.u.chunk.down_chunks, scaled_coords);
+ }
+
+ scaled_coords[io_info->dset->shared->ndims] = 0;
+
+#ifndef NDEBUG
+ /*
+ * If a matching local chunk entry is found, the
+ * `chunk_info` structure (which contains the chunk's
+ * pre-computed scaled coordinates) will be valid
+ * for this rank. Compare those coordinates against
+ * the calculated coordinates above to make sure
+ * they match.
+ */
+ for (size_t dbg_idx = 0; dbg_idx < chunk_list_num_entries; dbg_idx++) {
+ if (coll_entry->index_info.chunk_idx == chunk_list[dbg_idx].index_info.chunk_idx) {
+ hbool_t coords_match = !HDmemcmp(scaled_coords, chunk_list[dbg_idx].chunk_info->scaled,
+ io_info->dset->shared->ndims * sizeof(hsize_t));
+
+ HDassert(coords_match && "Calculated scaled coordinates for chunk didn't match "
+ "chunk's actual scaled coordinates!");
+ break;
+ }
+ }
+#endif
+
+ if ((idx_info->storage->ops->insert)(idx_info, &chunk_ud, io_info->dset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk address into index")
+ }
+
+done:
+ H5MM_free(gathered_array);
+ H5MM_free(counts_disps_array);
+
+ if (send_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&send_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+ if (recv_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&recv_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+
+#ifdef H5Dmpio_DEBUG
+ H5D_MPIO_TIME_STOP(mpi_rank);
+ H5D_MPIO_TRACE_EXIT(mpi_rank);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_collective_filtered_chunk_reinsert() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_get_chunk_redistribute_info_types
+ *
+ * Purpose: Constructs MPI derived datatypes for communicating the
+ * info from a H5D_filtered_collective_io_info_t structure
+ * that is necessary for redistributing shared chunks during a
+ * collective write of filtered chunks.
+ *
+ * The datatype returned through `contig_type` has an extent
+ * equal to the size of an H5D_chunk_redistribute_info_t
+ * structure and is suitable for communicating that structure
+ * type.
+ *
+ * The datatype returned through `resized_type` has an extent
+ * equal to the size of an H5D_filtered_collective_io_info_t
+ * structure. This makes it suitable for sending an array of
+ * those structures, while extracting out just the info
+ * necessary for the chunk redistribution operation during
+ * communication.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_get_chunk_redistribute_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived,
+ MPI_Datatype *resized_type, hbool_t *resized_type_derived)
+{
+ MPI_Datatype struct_type = MPI_DATATYPE_NULL;
+ hbool_t struct_type_derived = FALSE;
+ MPI_Datatype chunk_block_type = MPI_DATATYPE_NULL;
+ hbool_t chunk_block_type_derived = FALSE;
+ MPI_Datatype types[5];
+ MPI_Aint displacements[5];
+ int block_lengths[5];
+ int field_count;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(contig_type);
+ HDassert(contig_type_derived);
+ HDassert(resized_type);
+ HDassert(resized_type_derived);
+
+ *contig_type_derived = FALSE;
+ *resized_type_derived = FALSE;
+
+ /* Create struct type for the inner H5F_block_t structure */
+ if (H5F_mpi_get_file_block_type(FALSE, &chunk_block_type, &chunk_block_type_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description")
+
+ field_count = 5;
+ HDassert(field_count == (sizeof(types) / sizeof(MPI_Datatype)));
+
+ /*
+ * Create structure type to pack chunk H5F_block_t structure
+ * next to chunk_idx, orig_owner, new_owner and num_writers
+ * fields
+ */
+ block_lengths[0] = 1;
+ block_lengths[1] = 1;
+ block_lengths[2] = 1;
+ block_lengths[3] = 1;
+ block_lengths[4] = 1;
+ displacements[0] = offsetof(H5D_chunk_redistribute_info_t, chunk_block);
+ displacements[1] = offsetof(H5D_chunk_redistribute_info_t, chunk_idx);
+ displacements[2] = offsetof(H5D_chunk_redistribute_info_t, orig_owner);
+ displacements[3] = offsetof(H5D_chunk_redistribute_info_t, new_owner);
+ displacements[4] = offsetof(H5D_chunk_redistribute_info_t, num_writers);
+ types[0] = chunk_block_type;
+ types[1] = HSIZE_AS_MPI_TYPE;
+ types[2] = MPI_INT;
+ types[3] = MPI_INT;
+ types[4] = MPI_INT;
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, contig_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ *contig_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(contig_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+ /* Create struct type to extract the chunk_current, chunk_idx, orig_owner,
+ * new_owner and num_writers fields from a H5D_filtered_collective_io_info_t
+ * structure
+ */
+ block_lengths[0] = 1;
+ block_lengths[1] = 1;
+ block_lengths[2] = 1;
+ block_lengths[3] = 1;
+ block_lengths[4] = 1;
+ displacements[0] = offsetof(H5D_filtered_collective_io_info_t, chunk_current);
+ displacements[1] = offsetof(H5D_filtered_collective_io_info_t, index_info.chunk_idx);
+ displacements[2] = offsetof(H5D_filtered_collective_io_info_t, orig_owner);
+ displacements[3] = offsetof(H5D_filtered_collective_io_info_t, new_owner);
+ displacements[4] = offsetof(H5D_filtered_collective_io_info_t, num_writers);
+ types[0] = chunk_block_type;
+ types[1] = HSIZE_AS_MPI_TYPE;
+ types[2] = MPI_INT;
+ types[3] = MPI_INT;
+ types[4] = MPI_INT;
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ struct_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized(
+ struct_type, 0, sizeof(H5D_filtered_collective_io_info_t), resized_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code)
+ *resized_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(resized_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+done:
+ if (struct_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&struct_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+ if (chunk_block_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_block_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+
+ if (ret_value < 0) {
+ if (*resized_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(resized_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *resized_type_derived = FALSE;
+ }
+ if (*contig_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(contig_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *contig_type_derived = FALSE;
+ }
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_get_chunk_redistribute_info_types() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_get_chunk_alloc_info_types
+ *
+ * Purpose: Constructs MPI derived datatypes for communicating the info
+ * from a H5D_filtered_collective_io_info_t structure that is
+ * necessary for re-allocating file space during a collective
+ * write of filtered chunks.
+ *
+ * The datatype returned through `contig_type` has an extent
+ * equal to the size of an H5D_chunk_alloc_info_t structure
+ * and is suitable for communicating that structure type.
+ *
+ * The datatype returned through `resized_type` has an extent
+ * equal to the size of an H5D_filtered_collective_io_info_t
+ * structure. This makes it suitable for sending an array of
+ * those structures, while extracting out just the info
+ * necessary for the chunk file space reallocation operation
+ * during communication.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_get_chunk_alloc_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived,
+ MPI_Datatype *resized_type, hbool_t *resized_type_derived)
+{
+ MPI_Datatype struct_type = MPI_DATATYPE_NULL;
+ hbool_t struct_type_derived = FALSE;
+ MPI_Datatype chunk_block_type = MPI_DATATYPE_NULL;
+ hbool_t chunk_block_type_derived = FALSE;
+ MPI_Datatype types[3];
+ MPI_Aint displacements[3];
+ int block_lengths[3];
+ int field_count;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(contig_type);
+ HDassert(contig_type_derived);
+ HDassert(resized_type);
+ HDassert(resized_type_derived);
+
+ *contig_type_derived = FALSE;
+ *resized_type_derived = FALSE;
+
+ /* Create struct type for the inner H5F_block_t structure */
+ if (H5F_mpi_get_file_block_type(FALSE, &chunk_block_type, &chunk_block_type_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description")
+
+ field_count = 3;
+ HDassert(field_count == (sizeof(types) / sizeof(MPI_Datatype)));
+
+ /*
+ * Create structure type to pack both chunk H5F_block_t structures
+ * next to chunk_idx field
+ */
+ block_lengths[0] = 1;
+ block_lengths[1] = 1;
+ block_lengths[2] = 1;
+ displacements[0] = offsetof(H5D_chunk_alloc_info_t, chunk_current);
+ displacements[1] = offsetof(H5D_chunk_alloc_info_t, chunk_new);
+ displacements[2] = offsetof(H5D_chunk_alloc_info_t, chunk_idx);
+ types[0] = chunk_block_type;
+ types[1] = chunk_block_type;
+ types[2] = HSIZE_AS_MPI_TYPE;
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, contig_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ *contig_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(contig_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+ /*
+ * Create struct type to extract the chunk_current, chunk_new and chunk_idx
+ * fields from a H5D_filtered_collective_io_info_t structure
+ */
+ block_lengths[0] = 1;
+ block_lengths[1] = 1;
+ block_lengths[2] = 1;
+ displacements[0] = offsetof(H5D_filtered_collective_io_info_t, chunk_current);
+ displacements[1] = offsetof(H5D_filtered_collective_io_info_t, chunk_new);
+ displacements[2] = offsetof(H5D_filtered_collective_io_info_t, index_info.chunk_idx);
+ types[0] = chunk_block_type;
+ types[1] = chunk_block_type;
+ types[2] = HSIZE_AS_MPI_TYPE;
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ struct_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized(
+ struct_type, 0, sizeof(H5D_filtered_collective_io_info_t), resized_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code)
+ *resized_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(resized_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+done:
+ if (struct_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&struct_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+ if (chunk_block_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_block_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+
+ if (ret_value < 0) {
+ if (*resized_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(resized_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *resized_type_derived = FALSE;
+ }
+ if (*contig_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(contig_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *contig_type_derived = FALSE;
+ }
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_get_chunk_alloc_info_types() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_get_chunk_insert_info_types
+ *
+ * Purpose: Constructs MPI derived datatypes for communicating the
+ * information necessary when reinserting chunks into a
+ * dataset's chunk index. This includes the chunk's new offset
+ * and size (H5F_block_t) and the inner `index_info` structure
+ * of a H5D_filtered_collective_io_info_t structure.
+ *
+ * The datatype returned through `contig_type` has an extent
+ * equal to the size of an H5D_chunk_insert_info_t structure
+ * and is suitable for communicating that structure type.
+ *
+ * The datatype returned through `resized_type` has an extent
+ * equal to the size of the encompassing
+ * H5D_filtered_collective_io_info_t structure. This makes it
+ * suitable for sending an array of
+ * H5D_filtered_collective_io_info_t structures, while
+ * extracting out just the information needed during
+ * communication.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_get_chunk_insert_info_types(MPI_Datatype *contig_type, hbool_t *contig_type_derived,
+ MPI_Datatype *resized_type, hbool_t *resized_type_derived)
+{
+ MPI_Datatype struct_type = MPI_DATATYPE_NULL;
+ hbool_t struct_type_derived = FALSE;
+ MPI_Datatype chunk_block_type = MPI_DATATYPE_NULL;
+ hbool_t chunk_block_type_derived = FALSE;
+ MPI_Aint contig_type_extent;
+ MPI_Datatype types[4];
+ MPI_Aint displacements[4];
+ int block_lengths[4];
+ int field_count;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(contig_type);
+ HDassert(contig_type_derived);
+ HDassert(resized_type);
+ HDassert(resized_type_derived);
+
+ *contig_type_derived = FALSE;
+ *resized_type_derived = FALSE;
+
+ /* Create struct type for an H5F_block_t structure */
+ if (H5F_mpi_get_file_block_type(FALSE, &chunk_block_type, &chunk_block_type_derived) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't create derived type for chunk file description")
+
+ field_count = 4;
+ HDassert(field_count == (sizeof(types) / sizeof(MPI_Datatype)));
+
+ /*
+ * Create struct type to pack information into memory as follows:
+ *
+ * Chunk's new Offset/Size (H5F_block_t) ->
+ * Chunk Index Info (H5D_chunk_index_info_t)
+ */
+ block_lengths[0] = 1;
+ block_lengths[1] = 1;
+ block_lengths[2] = 1;
+ block_lengths[3] = 1;
+ displacements[0] = offsetof(H5D_chunk_insert_info_t, chunk_block);
+ displacements[1] = offsetof(H5D_chunk_insert_info_t, index_info.chunk_idx);
+ displacements[2] = offsetof(H5D_chunk_insert_info_t, index_info.filter_mask);
+ displacements[3] = offsetof(H5D_chunk_insert_info_t, index_info.need_insert);
+ types[0] = chunk_block_type;
+ types[1] = HSIZE_AS_MPI_TYPE;
+ types[2] = MPI_UNSIGNED;
+ types[3] = MPI_C_BOOL;
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ struct_type_derived = TRUE;
+
+ contig_type_extent = (MPI_Aint)(sizeof(H5F_block_t) + sizeof(H5D_chunk_index_info_t));
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized(struct_type, 0, contig_type_extent, contig_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code)
+ *contig_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(contig_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+ struct_type_derived = FALSE;
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&struct_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+
+ /*
+ * Create struct type to correctly extract all needed
+ * information from a H5D_filtered_collective_io_info_t
+ * structure.
+ */
+ displacements[0] = offsetof(H5D_filtered_collective_io_info_t, chunk_new);
+ displacements[1] = offsetof(H5D_filtered_collective_io_info_t, index_info.chunk_idx);
+ displacements[2] = offsetof(H5D_filtered_collective_io_info_t, index_info.filter_mask);
+ displacements[3] = offsetof(H5D_filtered_collective_io_info_t, index_info.need_insert);
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, &struct_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ struct_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_resized(
+ struct_type, 0, sizeof(H5D_filtered_collective_io_info_t), resized_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_resized failed", mpi_code)
+ *resized_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(resized_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
done:
- if (chunk_entry->async_info.receive_buffer_array)
- H5MM_free(chunk_entry->async_info.receive_buffer_array);
- if (chunk_entry->async_info.receive_requests_array)
- H5MM_free(chunk_entry->async_info.receive_requests_array);
- if (tmp_gath_buf)
- H5MM_free(tmp_gath_buf);
- if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
- if (file_iter)
- H5MM_free(file_iter);
- if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
- HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "couldn't release selection iterator")
- if (mem_iter)
- H5MM_free(mem_iter);
- if (dataspace)
- if (H5S_close(dataspace) < 0)
- HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "can't close dataspace")
+ if (struct_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&struct_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+ if (chunk_block_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(&chunk_block_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ }
+
+ if (ret_value < 0) {
+ if (*resized_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(resized_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *resized_type_derived = FALSE;
+ }
+ if (*contig_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(contig_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *contig_type_derived = FALSE;
+ }
+ }
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5D__filtered_collective_chunk_entry_io() */
+} /* end H5D__mpio_get_chunk_insert_info_types() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__mpio_collective_filtered_io_type
+ *
+ * Purpose: Constructs a MPI derived datatype for both the memory and
+ * the file for a collective I/O operation on filtered chunks.
+ * The datatype contains the chunk offsets and lengths in the
+ * file and the locations of the chunk data buffers to read
+ * into/write from.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5D__mpio_collective_filtered_io_type(H5D_filtered_collective_io_info_t *chunk_list, size_t num_entries,
+ H5D_io_op_type_t op_type, MPI_Datatype *new_mem_type,
+ hbool_t *mem_type_derived, MPI_Datatype *new_file_type,
+ hbool_t *file_type_derived)
+{
+ MPI_Aint *io_buf_array = NULL; /* Relative displacements of filtered chunk data buffers */
+ MPI_Aint *file_offset_array = NULL; /* Chunk offsets in the file */
+ int * length_array = NULL; /* Filtered Chunk lengths */
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(chunk_list || 0 == num_entries);
+ HDassert(new_mem_type);
+ HDassert(mem_type_derived);
+ HDassert(new_file_type);
+ HDassert(file_type_derived);
+
+ *mem_type_derived = FALSE;
+ *file_type_derived = FALSE;
+ *new_mem_type = MPI_BYTE;
+ *new_file_type = MPI_BYTE;
+
+ if (num_entries > 0) {
+ H5F_block_t *chunk_block;
+ size_t last_valid_idx = 0;
+ size_t i;
+ int chunk_count;
+
+ /*
+ * Determine number of chunks for I/O operation and
+ * setup for derived datatype creation if I/O operation
+ * includes multiple chunks
+ */
+ if (num_entries == 1) {
+ /* Set last valid index to 0 for contiguous datatype creation */
+ last_valid_idx = 0;
+
+ if (op_type == H5D_IO_OP_WRITE)
+ chunk_count = 1;
+ else
+ chunk_count = chunk_list[0].need_read ? 1 : 0;
+ }
+ else {
+ MPI_Aint chunk_buf;
+ MPI_Aint base_buf;
+ haddr_t base_offset = HADDR_UNDEF;
+
+ H5_CHECK_OVERFLOW(num_entries, size_t, int);
+
+ /* Allocate arrays */
+ if (NULL == (length_array = H5MM_malloc((size_t)num_entries * sizeof(int))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for filtered collective I/O length array")
+ if (NULL == (io_buf_array = H5MM_malloc((size_t)num_entries * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for filtered collective I/O buf length array")
+ if (NULL == (file_offset_array = H5MM_malloc((size_t)num_entries * sizeof(MPI_Aint))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for filtered collective I/O offset array")
+
+ /*
+ * If doing a write, we can set the base chunk offset
+ * and base chunk data buffer right away.
+ *
+ * If doing a read, some chunks may be skipped over
+ * for reading if they aren't yet allocated in the
+ * file. Therefore, we have to find the first chunk
+ * actually being read in order to set the base chunk
+ * offset and base chunk data buffer.
+ */
+ if (op_type == H5D_IO_OP_WRITE) {
+#if H5_CHECK_MPI_VERSION(3, 0)
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address(chunk_list[0].buf, &base_buf)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address failed", mpi_code)
+#else
+ base_buf = (MPI_Aint)chunk_list[0].buf;
+#endif
+
+ base_offset = chunk_list[0].chunk_new.offset;
+ }
+
+ for (i = 0, chunk_count = 0; i < num_entries; i++) {
+ if (op_type == H5D_IO_OP_READ) {
+ /*
+ * If this chunk isn't being read, don't add it
+ * to the MPI type we're building up for I/O
+ */
+ if (!chunk_list[i].need_read)
+ continue;
+
+ /*
+ * If this chunk is being read, go ahead and
+ * set the base chunk offset and base chunk
+ * data buffer if we haven't already
+ */
+ if (!H5F_addr_defined(base_offset)) {
+#if H5_CHECK_MPI_VERSION(3, 0)
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address(chunk_list[i].buf, &base_buf)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address failed", mpi_code)
+#else
+ base_buf = (MPI_Aint)chunk_list[i].buf;
+#endif
+
+ base_offset = chunk_list[i].chunk_current.offset;
+ }
+ }
+
+ /* Set convenience pointer for current chunk block */
+ chunk_block =
+ (op_type == H5D_IO_OP_READ) ? &chunk_list[i].chunk_current : &chunk_list[i].chunk_new;
+
+ /*
+ * Set the current chunk entry's offset in the file, relative to
+ * the first chunk entry
+ */
+ HDassert(H5F_addr_defined(chunk_block->offset));
+ file_offset_array[chunk_count] = (MPI_Aint)(chunk_block->offset - base_offset);
+
+ /*
+ * Ensure the chunk list is sorted in ascending ordering of
+ * offset in the file
+ */
+ if (chunk_count)
+ HDassert(file_offset_array[chunk_count] > file_offset_array[chunk_count - 1]);
+
+ /* Set the current chunk entry's size for the I/O operation */
+ H5_CHECK_OVERFLOW(chunk_block->length, hsize_t, int);
+ length_array[chunk_count] = (int)chunk_block->length;
+
+ /*
+ * Set the displacement of the chunk entry's chunk data buffer,
+ * relative to the first entry's data buffer
+ */
+#if H5_CHECK_MPI_VERSION(3, 1)
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address(chunk_list[i].buf, &chunk_buf)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address failed", mpi_code)
+
+ io_buf_array[chunk_count] = MPI_Aint_diff(chunk_buf, base_buf);
+#else
+ chunk_buf = (MPI_Aint)chunk_list[i].buf;
+ io_buf_array[chunk_count] = chunk_buf - base_buf;
+#endif
+
+ /*
+ * Set last valid index in case only a single chunk will
+ * be involved in the I/O operation
+ */
+ last_valid_idx = i;
+
+ chunk_count++;
+ } /* end for */
+ }
+
+ /*
+ * Create derived datatypes for the chunk list if this
+ * rank has any chunks to work on
+ */
+ if (chunk_count > 0) {
+ if (chunk_count == 1) {
+ int chunk_len;
+
+ /* Single chunk - use a contiguous type for both memory and file */
+
+ /* Ensure that we can cast chunk size to an int for MPI */
+ chunk_block = (op_type == H5D_IO_OP_READ) ? &chunk_list[last_valid_idx].chunk_current
+ : &chunk_list[last_valid_idx].chunk_new;
+ H5_CHECKED_ASSIGN(chunk_len, int, chunk_block->length, hsize_t);
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(chunk_len, MPI_BYTE, new_file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ *new_mem_type = *new_file_type;
+
+ /*
+ * Since we use the same datatype for both memory and file, only
+ * mark the file type as derived so the caller doesn't try to
+ * free the same type twice
+ */
+ *mem_type_derived = FALSE;
+ *file_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ }
+ else {
+ HDassert(file_offset_array);
+ HDassert(length_array);
+ HDassert(io_buf_array);
+
+ /* Multiple chunks - use an hindexed type for both memory and file */
+
+ /* Create memory MPI type */
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(
+ chunk_count, length_array, io_buf_array, MPI_BYTE, new_mem_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+ *mem_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_mem_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+ /* Create file MPI type */
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_hindexed(chunk_count, length_array, file_offset_array,
+ MPI_BYTE, new_file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+ *file_type_derived = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ }
+ }
+ } /* end if */
+
+done:
+ if (file_offset_array)
+ H5MM_free(file_offset_array);
+ if (io_buf_array)
+ H5MM_free(io_buf_array);
+ if (length_array)
+ H5MM_free(length_array);
+
+ if (ret_value < 0) {
+ if (*file_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(new_file_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *file_type_derived = FALSE;
+ }
+ if (*mem_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(new_mem_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *mem_type_derived = FALSE;
+ }
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_collective_filtered_io_type() */
+
+#ifdef H5Dmpio_DEBUG
+
+static herr_t
+H5D__mpio_dump_collective_filtered_chunk_list(H5D_filtered_collective_io_info_t *chunk_list,
+ size_t chunk_list_num_entries, int mpi_rank)
+{
+ H5D_filtered_collective_io_info_t *chunk_entry;
+ size_t i;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ H5D_MPIO_DEBUG(mpi_rank, "CHUNK LIST: [");
+ for (i = 0; i < chunk_list_num_entries; i++) {
+ unsigned chunk_rank;
+
+ chunk_entry = &chunk_list[i];
+
+ HDassert(chunk_entry->chunk_info);
+ chunk_rank = (unsigned)H5S_GET_EXTENT_NDIMS(chunk_entry->chunk_info->fspace);
+
+ H5D_MPIO_DEBUG(mpi_rank, " {");
+ H5D_MPIO_DEBUG_VA(mpi_rank, " - Entry %zu -", i);
+
+ H5D_MPIO_DEBUG(mpi_rank, " - Chunk Fspace Info -");
+ H5D_MPIO_DEBUG_VA(mpi_rank,
+ " Chunk Current Info: { Offset: %" PRIuHADDR ", Length: %" PRIuHADDR " }",
+ chunk_entry->chunk_current.offset, chunk_entry->chunk_current.length);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk New Info: { Offset: %" PRIuHADDR ", Length: %" PRIuHADDR " }",
+ chunk_entry->chunk_new.offset, chunk_entry->chunk_new.length);
+
+ H5D_MPIO_DEBUG(mpi_rank, " - Chunk Insert Info -");
+ H5D_MPIO_DEBUG_VA(mpi_rank,
+ " Chunk Scaled Coords (4-d): { %" PRIuHSIZE ", %" PRIuHSIZE ", %" PRIuHSIZE
+ ", %" PRIuHSIZE " }",
+ chunk_rank < 1 ? 0 : chunk_entry->chunk_info->scaled[0],
+ chunk_rank < 2 ? 0 : chunk_entry->chunk_info->scaled[1],
+ chunk_rank < 3 ? 0 : chunk_entry->chunk_info->scaled[2],
+ chunk_rank < 4 ? 0 : chunk_entry->chunk_info->scaled[3]);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk Index: %" PRIuHSIZE, chunk_entry->index_info.chunk_idx);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Filter Mask: %u", chunk_entry->index_info.filter_mask);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Need Insert: %s",
+ chunk_entry->index_info.need_insert ? "YES" : "NO");
+
+ H5D_MPIO_DEBUG(mpi_rank, " - Other Info -");
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk Info Ptr: %p", (void *)chunk_entry->chunk_info);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Need Read: %s", chunk_entry->need_read ? "YES" : "NO");
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk I/O Size: %zu", chunk_entry->io_size);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk Buffer Size: %zu", chunk_entry->chunk_buf_size);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Original Owner: %d", chunk_entry->orig_owner);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " New Owner: %d", chunk_entry->new_owner);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " # of Writers: %d", chunk_entry->num_writers);
+ H5D_MPIO_DEBUG_VA(mpi_rank, " Chunk Data Buffer Ptr: %p", (void *)chunk_entry->buf);
+
+ H5D_MPIO_DEBUG(mpi_rank, " }");
+ }
+ H5D_MPIO_DEBUG(mpi_rank, "]");
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__mpio_dump_collective_filtered_chunk_list() */
+
+#endif
+
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index 49c95a5..0e0eb08 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -121,9 +121,9 @@ typedef herr_t (*H5D_layout_construct_func_t)(H5F_t *f, H5D_t *dset);
typedef herr_t (*H5D_layout_init_func_t)(H5F_t *f, const H5D_t *dset, hid_t dapl_id);
typedef hbool_t (*H5D_layout_is_space_alloc_func_t)(const H5O_storage_t *storage);
typedef hbool_t (*H5D_layout_is_data_cached_func_t)(const H5D_shared_t *shared_dset);
-typedef herr_t (*H5D_layout_io_init_func_t)(const struct H5D_io_info_t *io_info,
- const H5D_type_info_t *type_info, hsize_t nelmts,
- H5S_t *file_space, H5S_t *mem_space, struct H5D_chunk_map_t *cm);
+typedef herr_t (*H5D_layout_io_init_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
+ hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space,
+ struct H5D_chunk_map_t *cm);
typedef herr_t (*H5D_layout_read_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space,
struct H5D_chunk_map_t *fm);
@@ -222,6 +222,7 @@ typedef struct H5D_io_info_t {
H5D_layout_ops_t layout_ops; /* Dataset layout I/O operation function pointers */
H5D_io_ops_t io_ops; /* I/O operation function pointers */
H5D_io_op_type_t op_type;
+ hbool_t use_select_io; /* Whether to use selection I/O */
union {
void * rbuf; /* Pointer to buffer for read */
const void *wbuf; /* Pointer to buffer to write */
@@ -559,6 +560,7 @@ H5_DLL herr_t H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t
hbool_t full_overwrite, hsize_t old_dim[]);
H5_DLL herr_t H5D__get_storage_size(const H5D_t *dset, hsize_t *storage_size);
H5_DLL herr_t H5D__get_chunk_storage_size(H5D_t *dset, const hsize_t *offset, hsize_t *storage_size);
+H5_DLL herr_t H5D__chunk_index_empty(const H5D_t *dset, hbool_t *empty);
H5_DLL herr_t H5D__get_num_chunks(const H5D_t *dset, const H5S_t *space, hsize_t *nchunks);
H5_DLL herr_t H5D__get_chunk_info(const H5D_t *dset, const H5S_t *space, hsize_t chk_idx, hsize_t *coord,
unsigned *filter_mask, haddr_t *offset, hsize_t *size);
@@ -591,6 +593,10 @@ H5_DLL herr_t H5D__select_read(const H5D_io_info_t *io_info, const H5D_type_info
H5_DLL herr_t H5D__select_write(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space);
+/* Functions that perform direct copying between memory buffers */
+H5_DLL herr_t H5D_select_io_mem(void *dst_buf, const H5S_t *dst_space, const void *src_buf,
+ const H5S_t *src_space, size_t elmt_size, size_t nelmts);
+
/* Functions that perform scatter-gather serial I/O operations */
H5_DLL herr_t H5D__scatter_mem(const void *_tscat_buf, H5S_sel_iter_t *iter, size_t nelmts, void *_buf);
H5_DLL size_t H5D__gather_mem(const void *_buf, H5S_sel_iter_t *iter, size_t nelmts,
@@ -635,7 +641,13 @@ H5_DLL herr_t H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_ov
const hsize_t old_dim[]);
H5_DLL herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk,
H5F_block_t *new_chunk, hbool_t *need_insert, const hsize_t *scaled);
+H5_DLL void * H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline);
+H5_DLL void H5D__chunk_mem_free(void *chk, const void *_pline);
+H5_DLL void * H5D__chunk_mem_xfree(void *chk, const void *pline);
+H5_DLL void * H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline);
H5_DLL herr_t H5D__chunk_update_old_edge_chunks(H5D_t *dset, hsize_t old_dim[]);
+H5_DLL hbool_t H5D__chunk_is_partial_edge_chunk(unsigned dset_ndims, const uint32_t *chunk_dims,
+ const hsize_t *chunk_scaled, const hsize_t *dset_dims);
H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, const hsize_t *old_dim);
H5_DLL herr_t H5D__chunk_set_sizes(H5D_t *dset);
#ifdef H5_HAVE_PARALLEL
@@ -694,11 +706,11 @@ H5_DLL herr_t H5D__fill_term(H5D_fill_buf_info_t *fb_info);
#ifdef H5_HAVE_PARALLEL
-#ifdef H5S_DEBUG
+#ifdef H5D_DEBUG
#ifndef H5Dmpio_DEBUG
#define H5Dmpio_DEBUG
#endif /*H5Dmpio_DEBUG*/
-#endif /*H5S_DEBUG*/
+#endif /*H5D_DEBUG*/
/* MPI-IO function to read, it will select either regular or irregular read */
H5_DLL herr_t H5D__mpio_select_read(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space);
@@ -727,6 +739,8 @@ H5_DLL herr_t H5D__chunk_collective_write(H5D_io_info_t *io_info, const H5D_type
* memory and the file */
H5_DLL htri_t H5D__mpio_opt_possible(const H5D_io_info_t *io_info, const H5S_t *file_space,
const H5S_t *mem_space, const H5D_type_info_t *type_info);
+H5_DLL herr_t H5D__mpio_get_no_coll_cause_strings(char *local_cause, size_t local_cause_len,
+ char *global_cause, size_t global_cause_len);
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index 75f4b95..02644ed 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -77,7 +77,7 @@ typedef enum H5D_chunk_index_t {
*/
typedef enum H5D_alloc_time_t {
H5D_ALLOC_TIME_ERROR = -1, /**< Error */
- H5D_ALLOC_TIME_DEFAULT = 0, /**< \todo Define this! */
+ H5D_ALLOC_TIME_DEFAULT = 0, /**< Default (layout dependent) */
H5D_ALLOC_TIME_EARLY = 1, /**< Allocate on creation */
H5D_ALLOC_TIME_LATE = 2, /**< Allocate on first write */
H5D_ALLOC_TIME_INCR = 3 /**< Allocate incrementally (by chunk) */
@@ -91,9 +91,9 @@ typedef enum H5D_alloc_time_t {
typedef enum H5D_space_status_t {
H5D_SPACE_STATUS_ERROR = -1, /**< Error */
H5D_SPACE_STATUS_NOT_ALLOCATED = 0, /**< Space has not been allocated for this dataset. */
- H5D_SPACE_STATUS_PART_ALLOCATED = 1, /**< Space has been allocated for this dataset. */
- H5D_SPACE_STATUS_ALLOCATED = 2 /**< Space has been partially allocated for this dataset. (Used only for
- datasets with chunked storage.) */
+ H5D_SPACE_STATUS_PART_ALLOCATED = 1, /**< Space has been partially allocated for this dataset.
+ (Used only for datasets with chunked storage.) */
+ H5D_SPACE_STATUS_ALLOCATED = 2 /**< Space has been allocated for this dataset. */
} H5D_space_status_t;
//! <!-- [H5D_space_status_t_snip] -->
@@ -127,8 +127,8 @@ typedef enum H5D_fill_value_t {
*/
typedef enum H5D_vds_view_t {
H5D_VDS_ERROR = -1, /**< Error */
- H5D_VDS_FIRST_MISSING = 0, /**< \todo Define this! */
- H5D_VDS_LAST_AVAILABLE = 1 /**< \todo Define this! */
+ H5D_VDS_FIRST_MISSING = 0, /**< Include all data before the first missing mapped data */
+ H5D_VDS_LAST_AVAILABLE = 1 /**< Include all available mapped data */
} H5D_vds_view_t;
//! <!-- [H5D_vds_view_t_snip] -->
@@ -682,8 +682,7 @@ H5_DLL herr_t H5Dget_chunk_info_by_coord(hid_t dset_id, const hsize_t *offset, u
* Iterate over all chunked datasets and chunks in a file.
* \snippet H5D_examples.c H5Ovisit_cb
*
- * \version 1.?.?
- * \todo When was this function introduced?
+ * \since 1.13.0
*
*/
H5_DLL herr_t H5Dchunk_iter(hid_t dset_id, hid_t dxpl_id, H5D_chunk_iter_op_t cb, void *op_data);
diff --git a/src/H5Dselect.c b/src/H5Dselect.c
index e64d657..f464ca5 100644
--- a/src/H5Dselect.c
+++ b/src/H5Dselect.c
@@ -105,6 +105,9 @@ H5D__select_io(const H5D_io_info_t *io_info, size_t elmt_size, size_t nelmts, H5
HDassert(io_info->store);
HDassert(io_info->u.rbuf);
+ if (elmt_size == 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "invalid elmt_size of 0")
+
/* Check for only one element in selection */
if (nelmts == 1) {
hsize_t single_mem_off; /* Offset in memory */
@@ -226,8 +229,6 @@ H5D__select_io(const H5D_io_info_t *io_info, size_t elmt_size, size_t nelmts, H5
/* Decrement number of elements left to process */
HDassert(((size_t)tmp_file_len % elmt_size) == 0);
- if (elmt_size == 0)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "Resulted in division by zero")
nelmts -= ((size_t)tmp_file_len / elmt_size);
} /* end while */
} /* end else */
@@ -257,6 +258,188 @@ done:
} /* end H5D__select_io() */
/*-------------------------------------------------------------------------
+ * Function: H5D_select_io_mem
+ *
+ * Purpose: Perform memory copies directly between two memory buffers
+ * according to the selections in the `dst_space` and
+ * `src_space` dataspaces.
+ *
+ * Note: This routine is [basically] the same as H5D__select_io,
+ * with the only difference being that the readvv/writevv
+ * calls are exchanged for H5VM_memcpyvv calls. Changes should
+ * be made to both routines.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D_select_io_mem(void *dst_buf, const H5S_t *dst_space, const void *src_buf, const H5S_t *src_space,
+ size_t elmt_size, size_t nelmts)
+{
+ H5S_sel_iter_t *dst_sel_iter = NULL; /* Destination dataspace iteration info */
+ H5S_sel_iter_t *src_sel_iter = NULL; /* Source dataspace iteration info */
+ hbool_t dst_sel_iter_init = FALSE; /* Destination dataspace selection iterator initialized? */
+ hbool_t src_sel_iter_init = FALSE; /* Source dataspace selection iterator initialized? */
+ hsize_t * dst_off = NULL; /* Pointer to sequence offsets in destination buffer */
+ hsize_t * src_off = NULL; /* Pointer to sequence offsets in source buffer */
+ size_t * dst_len = NULL; /* Pointer to sequence lengths in destination buffer */
+ size_t * src_len = NULL; /* Pointer to sequence lengths in source buffer */
+ size_t curr_dst_seq; /* Current destination buffer sequence to operate on */
+ size_t curr_src_seq; /* Current source buffer sequence to operate on */
+ size_t dst_nseq; /* Number of sequences generated for destination buffer */
+ size_t src_nseq; /* Number of sequences generated for source buffer */
+ size_t dxpl_vec_size; /* Vector length from API context's DXPL */
+ size_t vec_size; /* Vector length */
+ ssize_t bytes_copied;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(dst_buf);
+ HDassert(dst_space);
+ HDassert(src_buf);
+ HDassert(src_space);
+
+ if (elmt_size == 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "invalid elmt_size of 0")
+
+ /* Check for only one element in selection */
+ if (nelmts == 1) {
+ hsize_t single_dst_off; /* Offset in dst_space */
+ hsize_t single_src_off; /* Offset in src_space */
+ size_t single_dst_len; /* Length in dst_space */
+ size_t single_src_len; /* Length in src_space */
+
+ /* Get offset of first element in selections */
+ if (H5S_SELECT_OFFSET(dst_space, &single_dst_off) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve destination selection offset")
+ if (H5S_SELECT_OFFSET(src_space, &single_src_off) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve source selection offset")
+
+ /* Set up necessary information for I/O operation */
+ dst_nseq = src_nseq = 1;
+ curr_dst_seq = curr_src_seq = 0;
+ single_dst_off *= elmt_size;
+ single_src_off *= elmt_size;
+ single_dst_len = single_src_len = elmt_size;
+
+ /* Perform vectorized memcpy from src_buf to dst_buf */
+ if ((bytes_copied =
+ H5VM_memcpyvv(dst_buf, dst_nseq, &curr_dst_seq, &single_dst_len, &single_dst_off, src_buf,
+ src_nseq, &curr_src_seq, &single_src_len, &single_src_off)) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed")
+
+ HDassert(((size_t)bytes_copied % elmt_size) == 0);
+ }
+ else {
+ unsigned sel_iter_flags = H5S_SEL_ITER_GET_SEQ_LIST_SORTED | H5S_SEL_ITER_SHARE_WITH_DATASPACE;
+ size_t dst_nelem; /* Number of elements used in destination buffer sequences */
+ size_t src_nelem; /* Number of elements used in source buffer sequences */
+
+ /* Get info from API context */
+ if (H5CX_get_vec_size(&dxpl_vec_size) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_CANTGET, FAIL, "can't retrieve I/O vector size")
+
+ /* Allocate the vector I/O arrays */
+ if (dxpl_vec_size > H5D_IO_VECTOR_SIZE)
+ vec_size = dxpl_vec_size;
+ else
+ vec_size = H5D_IO_VECTOR_SIZE;
+
+ if (NULL == (dst_len = H5FL_SEQ_MALLOC(size_t, vec_size)))
+ HGOTO_ERROR(H5E_IO, H5E_CANTALLOC, FAIL, "can't allocate I/O length vector array")
+ if (NULL == (dst_off = H5FL_SEQ_MALLOC(hsize_t, vec_size)))
+ HGOTO_ERROR(H5E_IO, H5E_CANTALLOC, FAIL, "can't allocate I/O offset vector array")
+ if (NULL == (src_len = H5FL_SEQ_MALLOC(size_t, vec_size)))
+ HGOTO_ERROR(H5E_IO, H5E_CANTALLOC, FAIL, "can't allocate I/O length vector array")
+ if (NULL == (src_off = H5FL_SEQ_MALLOC(hsize_t, vec_size)))
+ HGOTO_ERROR(H5E_IO, H5E_CANTALLOC, FAIL, "can't allocate I/O offset vector array")
+
+ /* Allocate the dataspace selection iterators */
+ if (NULL == (dst_sel_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate destination selection iterator")
+ if (NULL == (src_sel_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate source selection iterator")
+
+ /* Initialize destination selection iterator */
+ if (H5S_select_iter_init(dst_sel_iter, dst_space, elmt_size, sel_iter_flags) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
+ dst_sel_iter_init = TRUE; /* Destination selection iteration info has been initialized */
+
+ /* Initialize source selection iterator */
+ if (H5S_select_iter_init(src_sel_iter, src_space, elmt_size, H5S_SEL_ITER_SHARE_WITH_DATASPACE) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTINIT, FAIL, "unable to initialize selection iterator")
+ src_sel_iter_init = TRUE; /* Source selection iteration info has been initialized */
+
+ /* Initialize sequence counts */
+ curr_dst_seq = curr_src_seq = 0;
+ dst_nseq = src_nseq = 0;
+
+ /* Loop, until all bytes are processed */
+ while (nelmts > 0) {
+ /* Check if more destination buffer sequences are needed */
+ if (curr_dst_seq >= dst_nseq) {
+ /* Get sequences for destination selection */
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(dst_sel_iter, vec_size, nelmts, &dst_nseq, &dst_nelem,
+ dst_off, dst_len) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "sequence length generation failed")
+
+ /* Start at the beginning of the sequences again */
+ curr_dst_seq = 0;
+ }
+
+ /* Check if more source buffer sequences are needed */
+ if (curr_src_seq >= src_nseq) {
+ /* Get sequences for source selection */
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(src_sel_iter, vec_size, nelmts, &src_nseq, &src_nelem,
+ src_off, src_len) < 0)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "sequence length generation failed")
+
+ /* Start at the beginning of the sequences again */
+ curr_src_seq = 0;
+ } /* end if */
+
+ /* Perform vectorized memcpy from src_buf to dst_buf */
+ if ((bytes_copied = H5VM_memcpyvv(dst_buf, dst_nseq, &curr_dst_seq, dst_len, dst_off, src_buf,
+ src_nseq, &curr_src_seq, src_len, src_off)) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed")
+
+ /* Decrement number of elements left to process */
+ HDassert(((size_t)bytes_copied % elmt_size) == 0);
+ nelmts -= ((size_t)bytes_copied / elmt_size);
+ }
+ }
+
+done:
+ /* Release selection iterators */
+ if (src_sel_iter) {
+ if (src_sel_iter_init && H5S_SELECT_ITER_RELEASE(src_sel_iter) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
+
+ src_sel_iter = H5FL_FREE(H5S_sel_iter_t, src_sel_iter);
+ }
+ if (dst_sel_iter) {
+ if (dst_sel_iter_init && H5S_SELECT_ITER_RELEASE(dst_sel_iter) < 0)
+ HDONE_ERROR(H5E_DATASPACE, H5E_CANTRELEASE, FAIL, "unable to release selection iterator")
+
+ dst_sel_iter = H5FL_FREE(H5S_sel_iter_t, dst_sel_iter);
+ }
+
+ /* Release vector arrays, if allocated */
+ if (src_off)
+ src_off = H5FL_SEQ_FREE(hsize_t, src_off);
+ if (src_len)
+ src_len = H5FL_SEQ_FREE(size_t, src_len);
+ if (dst_off)
+ dst_off = H5FL_SEQ_FREE(hsize_t, dst_off);
+ if (dst_len)
+ dst_len = H5FL_SEQ_FREE(size_t, dst_len);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D_select_io_mem() */
+
+/*-------------------------------------------------------------------------
* Function: H5D__select_read
*
* Purpose: Reads directly from file into application memory.
diff --git a/src/H5EAdbg.c b/src/H5EAdbg.c
index b0e564c..b377422 100644
--- a/src/H5EAdbg.c
+++ b/src/H5EAdbg.c
@@ -237,7 +237,7 @@ H5EA__iblock_debug(H5F_t *f, haddr_t H5_ATTR_UNUSED addr, FILE *stream, int inde
HDfprintf(stream, "%*sData Block Addresses in Index Block:\n", indent, "");
for (u = 0; u < iblock->ndblk_addrs; u++) {
/* Print address */
- HDsprintf(temp_str, "Address #%u:", u);
+ HDsnprintf(temp_str, sizeof(temp_str), "Address #%u:", u);
HDfprintf(stream, "%*s%-*s %" PRIuHADDR "\n", (indent + 3), "", MAX(0, (fwidth - 3)), temp_str,
iblock->dblk_addrs[u]);
} /* end for */
@@ -252,7 +252,7 @@ H5EA__iblock_debug(H5F_t *f, haddr_t H5_ATTR_UNUSED addr, FILE *stream, int inde
HDfprintf(stream, "%*sSuper Block Addresses in Index Block:\n", indent, "");
for (u = 0; u < iblock->nsblk_addrs; u++) {
/* Print address */
- HDsprintf(temp_str, "Address #%u:", u);
+ HDsnprintf(temp_str, sizeof(temp_str), "Address #%u:", u);
HDfprintf(stream, "%*s%-*s %" PRIuHADDR "\n", (indent + 3), "", MAX(0, (fwidth - 3)), temp_str,
iblock->sblk_addrs[u]);
} /* end for */
@@ -341,7 +341,7 @@ H5EA__sblock_debug(H5F_t *f, haddr_t addr, FILE *stream, int indent, int fwidth,
HDfprintf(stream, "%*sData Block Addresses in Super Block:\n", indent, "");
for (u = 0; u < sblock->ndblks; u++) {
/* Print address */
- HDsprintf(temp_str, "Address #%u:", u);
+ HDsnprintf(temp_str, sizeof(temp_str), "Address #%u:", u);
HDfprintf(stream, "%*s%-*s %" PRIuHADDR "\n", (indent + 3), "", MAX(0, (fwidth - 3)), temp_str,
sblock->dblk_addrs[u]);
} /* end for */
diff --git a/src/H5EAprivate.h b/src/H5EAprivate.h
index 19dabd9..9481559f 100644
--- a/src/H5EAprivate.h
+++ b/src/H5EAprivate.h
@@ -26,11 +26,6 @@
#ifndef H5EAprivate_H
#define H5EAprivate_H
-/* Include package's public header */
-#ifdef NOT_YET
-#include "H5EApublic.h"
-#endif /* NOT_YET */
-
/* Private headers needed by this file */
#include "H5ACprivate.h" /* Metadata cache */
#include "H5Fprivate.h" /* File access */
diff --git a/src/H5EAtest.c b/src/H5EAtest.c
index 7924eaa..24efbc2 100644
--- a/src/H5EAtest.c
+++ b/src/H5EAtest.c
@@ -322,7 +322,7 @@ H5EA__test_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void *
HDassert(elmt);
/* Print element */
- HDsprintf(temp_str, "Element #%llu:", (unsigned long long)idx);
+ HDsnprintf(temp_str, sizeof(temp_str), "Element #%llu:", (unsigned long long)idx);
HDfprintf(stream, "%*s%-*s %llu\n", indent, "", fwidth, temp_str,
(unsigned long long)*(const uint64_t *)elmt);
diff --git a/src/H5ES.c b/src/H5ES.c
index ccc0dd8..ad42000 100644
--- a/src/H5ES.c
+++ b/src/H5ES.c
@@ -236,6 +236,61 @@ done:
} /* end H5ESget_op_counter() */
/*-------------------------------------------------------------------------
+ * Function: H5ESget_requests
+ *
+ * Purpose: Retrieve the requests in an event set. Up to *count
+ * requests are stored in the provided requests array, and
+ * the connector ids corresponding to these requests are
+ * stored in the provided connector_ids array. Either or
+ * both of these arrays may be NULL, in which case this
+ * information is not returned. If these arrays are
+ * non-NULL, they must be large enough to contain *count
+ * entries. On exit, *count is set to the total number of
+ * events in the event set.
+ *
+ * Events are returned in the order they were added to the
+ * event set. If order is H5_ITER_INC or H5_ITER_NATIVE,
+ * events will be returned starting from the oldest. If order
+ * is H5_ITER_DEC, events will be returned starting with the
+ * newest/most recent.
+ *
+ * Return: SUCCEED / FAIL
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, November 23, 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5ESget_requests(hid_t es_id, H5_iter_order_t order, hid_t *connector_ids, void **requests, size_t array_len,
+ size_t *count /*out*/)
+{
+ H5ES_t *es; /* Event set */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE6("e", "iIo*i**xzx", es_id, order, connector_ids, requests, array_len, count);
+
+ /* Check arguments */
+ if (NULL == (es = H5I_object_verify(es_id, H5I_EVENTSET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid event set identifier")
+ if (order <= H5_ITER_UNKNOWN || order >= H5_ITER_N)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid iteration order specified")
+
+ /* Call internal routine */
+ if (array_len > 0 && (requests || connector_ids))
+ if (H5ES__get_requests(es, order, connector_ids, requests, array_len) < 0)
+ HGOTO_ERROR(H5E_EVENTSET, H5E_CANTGET, FAIL, "can't get requests")
+
+ /* Retrieve the count, if non-NULL */
+ if (count)
+ *count = H5ES__list_count(&es->active);
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5ESget_requests() */
+
+/*-------------------------------------------------------------------------
* Function: H5ESwait
*
* Purpose: Wait (with timeout) for operations in event set to complete
diff --git a/src/H5ESdevelop.h b/src/H5ESdevelop.h
index 5a0f2b4..2fb9aeb 100644
--- a/src/H5ESdevelop.h
+++ b/src/H5ESdevelop.h
@@ -42,6 +42,8 @@ extern "C" {
#endif
H5_DLL herr_t H5ESinsert_request(hid_t es_id, hid_t connector_id, void *request);
+H5_DLL herr_t H5ESget_requests(hid_t es_id, H5_iter_order_t order, hid_t *connector_ids, void **requests,
+ size_t array_len, size_t *count);
#ifdef __cplusplus
}
diff --git a/src/H5ESint.c b/src/H5ESint.c
index c66be16..7eb5909 100644
--- a/src/H5ESint.c
+++ b/src/H5ESint.c
@@ -50,6 +50,14 @@
/* Local Typedefs */
/******************/
+/* Callback context for get events operations */
+typedef struct H5ES_get_requests_ctx_t {
+ hid_t *connector_ids; /* Output buffer for list of connector IDs that match the above requests */
+ void **requests; /* Output buffer for list of requests in event set */
+ size_t array_len; /* Length of the above output buffers */
+ size_t i; /* Number of elements filled in output buffers */
+} H5ES_get_requests_ctx_t;
+
/* Callback context for wait operations */
typedef struct H5ES_wait_ctx_t {
H5ES_t * es; /* Event set being operated on */
@@ -84,6 +92,7 @@ static herr_t H5ES__close(H5ES_t *es);
static herr_t H5ES__close_cb(void *es, void **request_token);
static herr_t H5ES__insert(H5ES_t *es, H5VL_t *connector, void *request_token, const char *app_file,
const char *app_func, unsigned app_line, const char *caller, const char *api_args);
+static int H5ES__get_requests_cb(H5ES_event_t *ev, void *_ctx);
static herr_t H5ES__handle_fail(H5ES_t *es, H5ES_event_t *ev);
static herr_t H5ES__op_complete(H5ES_t *es, H5ES_event_t *ev, H5VL_request_status_t ev_status);
static int H5ES__wait_cb(H5ES_event_t *ev, void *_ctx);
@@ -282,7 +291,8 @@ H5ES__insert(H5ES_t *es, H5VL_t *connector, void *request_token, const char *app
* there's no need to duplicate it.
*/
ev->op_info.api_name = caller;
- if (NULL == (ev->op_info.api_args = H5MM_xstrdup(api_args)))
+ HDassert(ev->op_info.api_args == NULL);
+ if (api_args && NULL == (ev->op_info.api_args = H5MM_xstrdup(api_args)))
HGOTO_ERROR(H5E_EVENTSET, H5E_CANTALLOC, FAIL, "can't copy API routine arguments")
/* Append fully initialized event onto the event set's 'active' list */
@@ -419,6 +429,86 @@ done:
} /* end H5ES__insert_request() */
/*-------------------------------------------------------------------------
+ * Function: H5ES__get_requests_cb
+ *
+ * Purpose: Iterator callback for H5ES__get_events - adds the event to
+ * the list.
+ *
+ * Return: SUCCEED / FAIL
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, November 23, 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5ES__get_requests_cb(H5ES_event_t *ev, void *_ctx)
+{
+ H5ES_get_requests_ctx_t *ctx = (H5ES_get_requests_ctx_t *)_ctx; /* Callback context */
+ int ret_value = H5_ITER_CONT; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(ev);
+ HDassert(ctx);
+ HDassert(ctx->i < ctx->array_len);
+
+ /* Get the connector ID for the event */
+ if (ctx->connector_ids)
+ ctx->connector_ids[ctx->i] = ev->request->connector->id;
+
+ /* Get the request for the event */
+ if (ctx->requests)
+ ctx->requests[ctx->i] = ev->request->data;
+
+ /* Check if we've run out of room in the arrays */
+ if (++ctx->i == ctx->array_len)
+ ret_value = H5_ITER_STOP;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5ES__get_requests_cb() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5ES__get_requests
+ *
+ * Purpose: Get all requests in an event set.
+ *
+ * Return: SUCCEED / FAIL
+ *
+ * Programmer: Neil Fortner
+ * Tuesday, November 23, 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5ES__get_requests(H5ES_t *es, H5_iter_order_t order, hid_t *connector_ids, void **requests, size_t array_len)
+{
+ H5ES_get_requests_ctx_t ctx; /* Callback context */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(es);
+ HDassert(array_len > 0);
+ HDassert(requests || connector_ids);
+
+ /* Set up context for iterator callbacks */
+ ctx.connector_ids = connector_ids;
+ ctx.requests = requests;
+ ctx.array_len = array_len;
+ ctx.i = 0;
+
+ /* Iterate over the events in the set */
+ if (H5ES__list_iterate(&es->active, order, H5ES__get_requests_cb, &ctx) < 0)
+ HGOTO_ERROR(H5E_EVENTSET, H5E_BADITER, FAIL, "iteration failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5ES__get_requests() */
+
+/*-------------------------------------------------------------------------
* Function: H5ES__handle_fail
*
* Purpose: Handle a failed event
@@ -661,7 +751,7 @@ H5ES__wait(H5ES_t *es, uint64_t timeout, size_t *num_in_progress, hbool_t *op_fa
ctx.op_failed = op_failed;
/* Iterate over the events in the set, waiting for them to complete */
- if (H5ES__list_iterate(&es->active, H5ES__wait_cb, &ctx) < 0)
+ if (H5ES__list_iterate(&es->active, H5_ITER_NATIVE, H5ES__wait_cb, &ctx) < 0)
HGOTO_ERROR(H5E_EVENTSET, H5E_BADITER, FAIL, "iteration failed")
done:
@@ -769,7 +859,7 @@ H5ES__cancel(H5ES_t *es, size_t *num_not_canceled, hbool_t *op_failed)
ctx.op_failed = op_failed;
/* Iterate over the events in the set, attempting to cancel them */
- if (H5ES__list_iterate(&es->active, H5ES__cancel_cb, &ctx) < 0)
+ if (H5ES__list_iterate(&es->active, H5_ITER_NATIVE, H5ES__cancel_cb, &ctx) < 0)
HGOTO_ERROR(H5E_EVENTSET, H5E_BADITER, FAIL, "iteration failed")
done:
@@ -806,13 +896,13 @@ H5ES__get_err_info_cb(H5ES_event_t *ev, void *_ctx)
* so there's no need to duplicate them internally, but they are duplicated
* here, when they are given back to the user.
*/
- if (NULL == (ctx->curr_err_info->api_name = H5MM_strdup(ev->op_info.api_name)))
+ if (NULL == (ctx->curr_err_info->api_name = H5MM_xstrdup(ev->op_info.api_name)))
HGOTO_ERROR(H5E_EVENTSET, H5E_CANTALLOC, H5_ITER_ERROR, "can't copy HDF5 API routine name")
- if (NULL == (ctx->curr_err_info->api_args = H5MM_strdup(ev->op_info.api_args)))
+ if (NULL == (ctx->curr_err_info->api_args = H5MM_xstrdup(ev->op_info.api_args)))
HGOTO_ERROR(H5E_EVENTSET, H5E_CANTALLOC, H5_ITER_ERROR, "can't copy HDF5 API routine arguments")
- if (NULL == (ctx->curr_err_info->app_file_name = H5MM_strdup(ev->op_info.app_file_name)))
+ if (NULL == (ctx->curr_err_info->app_file_name = H5MM_xstrdup(ev->op_info.app_file_name)))
HGOTO_ERROR(H5E_EVENTSET, H5E_CANTALLOC, H5_ITER_ERROR, "can't copy HDF5 application file name")
- if (NULL == (ctx->curr_err_info->app_func_name = H5MM_strdup(ev->op_info.app_func_name)))
+ if (NULL == (ctx->curr_err_info->app_func_name = H5MM_xstrdup(ev->op_info.app_func_name)))
HGOTO_ERROR(H5E_EVENTSET, H5E_CANTALLOC, H5_ITER_ERROR, "can't copy HDF5 application function name")
ctx->curr_err_info->app_line_num = ev->op_info.app_line_num;
ctx->curr_err_info->op_ins_count = ev->op_info.op_ins_count;
@@ -883,7 +973,7 @@ H5ES__get_err_info(H5ES_t *es, size_t num_err_info, H5ES_err_info_t err_info[],
ctx.curr_err_info = &err_info[0];
/* Iterate over the failed events in the set, copying their error info */
- if (H5ES__list_iterate(&es->failed, H5ES__get_err_info_cb, &ctx) < 0)
+ if (H5ES__list_iterate(&es->failed, H5_ITER_NATIVE, H5ES__get_err_info_cb, &ctx) < 0)
HGOTO_ERROR(H5E_EVENTSET, H5E_BADITER, FAIL, "iteration failed")
/* Set # of failed events cleared from event set's failed list */
@@ -957,7 +1047,7 @@ H5ES__close(H5ES_t *es)
"can't close event set while unfinished operations are present (i.e. wait on event set first)")
/* Iterate over the failed events in the set, releasing them */
- if (H5ES__list_iterate(&es->failed, H5ES__close_failed_cb, (void *)es) < 0)
+ if (H5ES__list_iterate(&es->failed, H5_ITER_NATIVE, H5ES__close_failed_cb, (void *)es) < 0)
HGOTO_ERROR(H5E_EVENTSET, H5E_BADITER, FAIL, "iteration failed")
/* Release the event set */
diff --git a/src/H5ESlist.c b/src/H5ESlist.c
index 3180322..61a9dd1 100644
--- a/src/H5ESlist.c
+++ b/src/H5ESlist.c
@@ -135,7 +135,10 @@ H5ES__list_count(const H5ES_event_list_t *el)
* each event.
*
* Note: Iteration is safe for deleting the current event. Modifying
- * the list in other ways is likely unsafe.
+ * the list in other ways is likely unsafe. If order is
+ * H5_ITER_INC or H5_ITER_NATIVE events are visited starting
+ * with the oldest, otherwise they are visited starting with
+ * the newest.
*
* Return: SUCCEED / FAIL
*
@@ -145,7 +148,7 @@ H5ES__list_count(const H5ES_event_list_t *el)
*-------------------------------------------------------------------------
*/
int
-H5ES__list_iterate(H5ES_event_list_t *el, H5ES_list_iter_func_t cb, void *ctx)
+H5ES__list_iterate(H5ES_event_list_t *el, H5_iter_order_t order, H5ES_list_iter_func_t cb, void *ctx)
{
H5ES_event_t *ev; /* Event in list */
int ret_value = H5_ITER_CONT; /* Return value */
@@ -157,12 +160,12 @@ H5ES__list_iterate(H5ES_event_list_t *el, H5ES_list_iter_func_t cb, void *ctx)
HDassert(cb);
/* Iterate over events in list */
- ev = el->head;
+ ev = (order == H5_ITER_DEC) ? el->tail : el->head;
while (ev) {
H5ES_event_t *tmp; /* Temporary event */
/* Get pointer to next node, so it's safe if this one is removed */
- tmp = ev->next;
+ tmp = (order == H5_ITER_DEC) ? ev->prev : ev->next;
/* Perform iterator callback */
if ((ret_value = (*cb)(ev, ctx)) != H5_ITER_CONT) {
diff --git a/src/H5ESpkg.h b/src/H5ESpkg.h
index a7a8e20..6ee50fa 100644
--- a/src/H5ESpkg.h
+++ b/src/H5ESpkg.h
@@ -81,6 +81,8 @@ typedef int (*H5ES_list_iter_func_t)(H5ES_event_t *ev, void *ctx);
H5_DLL H5ES_t *H5ES__create(void);
H5_DLL herr_t H5ES__insert_request(H5ES_t *es, H5VL_t *connector, void *token);
H5_DLL herr_t H5ES__wait(H5ES_t *es, uint64_t timeout, size_t *num_in_progress, hbool_t *op_failed);
+H5_DLL herr_t H5ES__get_requests(H5ES_t *es, H5_iter_order_t order, hid_t *connector_ids, void **requests,
+ size_t array_len);
H5_DLL herr_t H5ES__cancel(H5ES_t *es, size_t *num_not_canceled, hbool_t *op_failed);
H5_DLL herr_t H5ES__get_err_info(H5ES_t *es, size_t num_err_info, H5ES_err_info_t err_info[],
size_t *num_cleared);
@@ -88,7 +90,8 @@ H5_DLL herr_t H5ES__get_err_info(H5ES_t *es, size_t num_err_info, H5ES_err_info
/* Event list operations */
H5_DLL void H5ES__list_append(H5ES_event_list_t *el, H5ES_event_t *ev);
H5_DLL size_t H5ES__list_count(const H5ES_event_list_t *el);
-H5_DLL int H5ES__list_iterate(H5ES_event_list_t *el, H5ES_list_iter_func_t cb, void *ctx);
+H5_DLL int H5ES__list_iterate(H5ES_event_list_t *el, H5_iter_order_t order, H5ES_list_iter_func_t cb,
+ void *ctx);
H5_DLL void H5ES__list_remove(H5ES_event_list_t *el, const H5ES_event_t *ev);
/* Event operations */
diff --git a/src/H5ESpublic.h b/src/H5ESpublic.h
index c8696b3..c8d1c7b 100644
--- a/src/H5ESpublic.h
+++ b/src/H5ESpublic.h
@@ -200,7 +200,18 @@ H5_DLL herr_t H5ESget_count(hid_t es_id, size_t *count);
/**
* \ingroup H5ES
*
- * \todo Fill in the blanks!
+ * \brief Retrieves the next operation counter to be assigned in an event set
+ *
+ * \es_id
+ * \param[out] counter The next counter value to be assigned to an event
+ * \returns \herr_t
+ *
+ * \details H5ESget_op_counter() retrieves the \p counter that will be assigned
+ * to the next operation inserted into the event set \p es_id.
+ *
+ * \note This is designed for wrapper libraries mainly, to use as a mechanism
+ * for matching operations inserted into the event set with possible
+ * errors that occur.
*
* \since 1.13.0
*
diff --git a/src/H5FAdblkpage.c b/src/H5FAdblkpage.c
index 713bd67..f6a5aef 100644
--- a/src/H5FAdblkpage.c
+++ b/src/H5FAdblkpage.c
@@ -147,7 +147,7 @@ H5FA__dblk_page_create(H5FA_hdr_t *hdr, haddr_t addr, size_t nelmts)
FUNC_ENTER_PACKAGE
#ifdef H5FA_DEBUG
- HDfprintf(stderr, "%s: Called, addr = %a\n", __func__, addr);
+ HDfprintf(stderr, "%s: Called, addr = %" PRIuHADDR "\n", __func__, addr);
#endif /* H5FA_DEBUG */
/* Sanity check */
diff --git a/src/H5FAprivate.h b/src/H5FAprivate.h
index 26057bf..745c129 100644
--- a/src/H5FAprivate.h
+++ b/src/H5FAprivate.h
@@ -24,11 +24,6 @@
#ifndef H5FAprivate_H
#define H5FAprivate_H
-/* Include package's public header */
-#ifdef NOT_YET
-#include "H5FApublic.h"
-#endif /* NOT_YET */
-
/* Private headers needed by this file */
#include "H5ACprivate.h" /* Metadata cache */
#include "H5Fprivate.h" /* File access */
@@ -134,7 +129,7 @@ H5_DLL herr_t H5FA_patch_file(H5FA_t *fa, H5F_t *f);
H5_DLL herr_t H5FA_get_stats(const H5FA_t *ea, H5FA_stat_t *stats);
/* Debugging routines */
-#ifdef H5FA_DEBUGGING
-#endif /* H5FA_DEBUGGING */
+#ifdef H5FA_DEBUG
+#endif /* H5FA_DEBUG */
#endif /* H5FAprivate_H */
diff --git a/src/H5FAtest.c b/src/H5FAtest.c
index 384a657..b57f562 100644
--- a/src/H5FAtest.c
+++ b/src/H5FAtest.c
@@ -303,7 +303,7 @@ H5FA__test_debug(FILE *stream, int indent, int fwidth, hsize_t idx, const void *
HDassert(elmt);
/* Print element */
- HDsprintf(temp_str, "Element #%llu:", (unsigned long long)idx);
+ HDsnprintf(temp_str, sizeof(temp_str), "Element #%llu:", (unsigned long long)idx);
HDfprintf(stream, "%*s%-*s %llu\n", indent, "", fwidth, temp_str,
(unsigned long long)*(const uint64_t *)elmt);
diff --git a/src/H5FD.c b/src/H5FD.c
index 397da34..1887e18 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -214,6 +214,8 @@ H5FDregister(const H5FD_class_t *cls)
/* Check arguments */
if (!cls)
HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, H5I_INVALID_HID, "null class pointer is disallowed")
+ if (cls->version != H5FD_CLASS_VERSION)
+ HGOTO_ERROR(H5E_ARGS, H5E_VERSION, H5I_INVALID_HID, "wrong file driver version #")
if (!cls->open || !cls->close)
HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, H5I_INVALID_HID,
"'open' and/or 'close' methods are not defined")
@@ -928,9 +930,10 @@ H5FD_cmp(const H5FD_t *f1, const H5FD_t *f2)
{
int ret_value = -1; /* Return value */
- FUNC_ENTER_NOAPI_NOERR /* return value is arbitrary */
+ FUNC_ENTER_NOAPI_NOERR; /* return value is arbitrary */
- if ((!f1 || !f1->cls) && (!f2 || !f2->cls)) HGOTO_DONE(0)
+ if ((!f1 || !f1->cls) && (!f2 || !f2->cls))
+ HGOTO_DONE(0)
if (!f1 || !f1->cls)
HGOTO_DONE(-1)
if (!f2 || !f2->cls)
@@ -1479,6 +1482,370 @@ done:
} /* end H5FDwrite() */
/*-------------------------------------------------------------------------
+ * Function: H5FDread_vector
+ *
+ * Purpose: Perform count reads from the specified file at the offsets
+ * provided in the addrs array, with the lengths and memory
+ * types provided in the sizes and types arrays. Data read
+ * is returned in the buffers provided in the bufs array.
+ *
+ * All reads are done according to the data transfer property
+ * list dxpl_id (which may be the constant H5P_DEFAULT).
+ *
+ * Return: Success: SUCCEED
+ * All reads have completed successfully, and
+ * the results havce been into the supplied
+ * buffers.
+ *
+ * Failure: FAIL
+ * The contents of supplied buffers are undefined.
+ *
+ * Programmer: JRM -- 6/10/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FDread_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], void *bufs[] /* out */)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE7("e", "*#iIu*Mt*a*zx", file, dxpl_id, count, types, addrs, sizes, bufs);
+
+ /* Check arguments */
+ if (!file)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL")
+
+ if (!file->cls)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL")
+
+ if ((!types) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "types parameter can't be NULL if count is positive")
+
+ if ((!addrs) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "addrs parameter can't be NULL if count is positive")
+
+ if ((!sizes) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes parameter can't be NULL if count is positive")
+
+ if ((!bufs) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive")
+
+ if ((count > 0) && (sizes[0] == 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0")
+
+ if ((count > 0) && (types[0] == H5FD_MEM_NOLIST))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "count[0] can't be H5FD_MEM_NOLIST")
+
+ /* Get the default dataset transfer property list if the user
+ * didn't provide one
+ */
+ if (H5P_DEFAULT == dxpl_id) {
+ dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ }
+ else {
+ if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
+ }
+
+ /* Set DXPL for operation */
+ H5CX_set_dxpl(dxpl_id);
+
+ /* Call private function */
+ /* (Note compensating for base addresses addition in internal routine) */
+ if (H5FD_read_vector(file, count, types, addrs, sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "file vector read request failed")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5FDread_vector() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FDwrite_vector
+ *
+ * Purpose: Perform count writes to the specified file at the offsets
+ * provided in the addrs array, with the lengths and memory
+ * types provided in the sizes and types arrays. Data to be
+ * written is in the buffers provided in the bufs array.
+ *
+ * All writes are done according to the data transfer property
+ * list dxpl_id (which may be the constant H5P_DEFAULT).
+ *
+ * Return: Success: SUCCEED
+ * All writes have completed successfully
+ *
+ * Failure: FAIL
+ * One or more of the writes failed.
+ *
+ * Programmer: JRM -- 6/10/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FDwrite_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], const void *bufs[] /* in */)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE7("e", "*#iIu*Mt*a*z**x", file, dxpl_id, count, types, addrs, sizes, bufs);
+
+ /* Check arguments */
+ if (!file)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL")
+
+ if (!file->cls)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL")
+
+ if ((!types) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "types parameter can't be NULL if count is positive")
+
+ if ((!addrs) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "addrs parameter can't be NULL if count is positive")
+
+ if ((!sizes) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes parameter can't be NULL if count is positive")
+
+ if ((!bufs) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive")
+
+ if ((count > 0) && (sizes[0] == 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0")
+
+ if ((count > 0) && (types[0] == H5FD_MEM_NOLIST))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "count[0] can't be H5FD_MEM_NOLIST")
+
+ /* Get the default dataset transfer property list if the user didn't provide one */
+ if (H5P_DEFAULT == dxpl_id) {
+ dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ }
+ else {
+ if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
+ }
+
+ /* Set DXPL for operation */
+ H5CX_set_dxpl(dxpl_id);
+
+ /* Call private function */
+ /* (Note compensating for base address addition in internal routine) */
+ if (H5FD_write_vector(file, count, types, addrs, sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "file vector write request failed")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5FDwrite_vector() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FDread_selection
+ *
+ * Purpose: Perform count reads from the specified file at the
+ * locations selected in the dataspaces in the file_spaces
+ * array, with each of those dataspaces starting at the file
+ * address specified by the corresponding element of the
+ * offsets array, and with the size of each element in the
+ * dataspace specified by the corresponding element of the
+ * element_sizes array. The memory type provided by type is
+ * the same for all selections. Data read is returned in
+ * the locations selected in the dataspaces in the
+ * mem_spaces array, within the buffers provided in the
+ * corresponding elements of the bufs array.
+ *
+ * If i > 0 and element_sizes[i] == 0, presume
+ * element_sizes[n] = element_sizes[i-1] for all n >= i and
+ * < count.
+ *
+ * If the underlying VFD supports selection reads, pass the
+ * call through directly.
+ *
+ * If it doesn't, convert the selection read into a sequence
+ * of individual reads.
+ *
+ * All reads are done according to the data transfer property
+ * list dxpl_id (which may be the constant H5P_DEFAULT).
+ *
+ * Return: Success: SUCCEED
+ * All reads have completed successfully, and
+ * the results havce been into the supplied
+ * buffers.
+ *
+ * Failure: FAIL
+ * The contents of supplied buffers are undefined.
+ *
+ * Programmer: NAF -- 5/19/21
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FDread_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, hid_t mem_space_ids[],
+ hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], void *bufs[] /* out */)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE9("e", "*#MtiIu*i*i*a*zx", file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets,
+ element_sizes, bufs);
+
+ /* Check arguments */
+ if (!file)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL")
+
+ if (!file->cls)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL")
+
+ if ((!mem_space_ids) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mem_spaces parameter can't be NULL if count is positive")
+
+ if ((!file_space_ids) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file_spaces parameter can't be NULL if count is positive")
+
+ if ((!offsets) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "offsets parameter can't be NULL if count is positive")
+
+ if ((!element_sizes) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "element_sizes parameter can't be NULL if count is positive")
+
+ if ((!bufs) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive")
+
+ if ((count > 0) && (element_sizes[0] == 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0")
+
+ if ((count > 0) && (bufs[0] == NULL))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs[0] can't be NULL")
+
+ /* Get the default dataset transfer property list if the user didn't provide one */
+ if (H5P_DEFAULT == dxpl_id) {
+ dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ }
+ else {
+ if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
+ }
+
+ /* Set DXPL for operation */
+ H5CX_set_dxpl(dxpl_id);
+
+ /* Call private function */
+ /* (Note compensating for base address addition in internal routine) */
+ if (H5FD_read_selection_id(file, type, count, mem_space_ids, file_space_ids, offsets, element_sizes,
+ bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "file selection read request failed")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5FDread_selection() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FDwrite_selection
+ *
+ * Purpose: Perform count writes to the specified file at the
+ * locations selected in the dataspaces in the file_spaces
+ * array, with each of those dataspaces starting at the file
+ * address specified by the corresponding element of the
+ * offsets array, and with the size of each element in the
+ * dataspace specified by the corresponding element of the
+ * element_sizes array. The memory type provided by type is
+ * the same for all selections. Data write is from
+ * the locations selected in the dataspaces in the
+ * mem_spaces array, within the buffers provided in the
+ * corresponding elements of the bufs array.
+ *
+ * If i > 0 and element_sizes[i] == 0, presume
+ * element_sizes[n] = element_sizes[i-1] for all n >= i and
+ * < count.
+ *
+ * If the underlying VFD supports selection writes, pass the
+ * call through directly.
+ *
+ * If it doesn't, convert the selection write into a sequence
+ * of individual writes.
+ *
+ * All writes are done according to the data transfer property
+ * list dxpl_id (which may be the constant H5P_DEFAULT).
+ *
+ * Return: Success: SUCCEED
+ * All writes have completed successfully
+ *
+ * Failure: FAIL
+ * One or more of the writes failed.
+ *
+ * Programmer: NAF -- 5/14/21
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FDwrite_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, hid_t mem_space_ids[],
+ hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], const void *bufs[])
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE9("e", "*#MtiIu*i*i*a*z**x", file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets,
+ element_sizes, bufs);
+
+ /* Check arguments */
+ if (!file)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL")
+
+ if (!file->cls)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL")
+
+ if ((!mem_space_ids) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mem_spaces parameter can't be NULL if count is positive")
+
+ if ((!file_space_ids) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file_spaces parameter can't be NULL if count is positive")
+
+ if ((!offsets) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "offsets parameter can't be NULL if count is positive")
+
+ if ((!element_sizes) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
+ "element_sizes parameter can't be NULL if count is positive")
+
+ if ((!bufs) && (count > 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive")
+
+ if ((count > 0) && (element_sizes[0] == 0))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0")
+
+ if ((count > 0) && (bufs[0] == NULL))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs[0] can't be NULL")
+
+ /* Get the default dataset transfer property list if the user didn't provide one */
+ if (H5P_DEFAULT == dxpl_id) {
+ dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ }
+ else {
+ if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
+ }
+
+ /* Set DXPL for operation */
+ H5CX_set_dxpl(dxpl_id);
+
+ /* Call private function */
+ /* (Note compensating for base address addition in internal routine) */
+ if (H5FD_write_selection_id(file, type, count, mem_space_ids, file_space_ids, offsets, element_sizes,
+ bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "file selection write request failed")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5FDwrite_selection() */
+
+/*-------------------------------------------------------------------------
* Function: H5FDflush
*
* Purpose: Notify driver to flush all cached data. If the driver has no
@@ -1826,7 +2193,7 @@ H5FD_ctl(H5FD_t *file, uint64_t op_code, uint64_t flags, const void *input, void
else if (flags & H5FD_CTL__FAIL_IF_UNKNOWN_FLAG) {
HGOTO_ERROR(H5E_VFL, H5E_FCNTL, FAIL,
- "VFD ctl request failed (no ctl callback and fail if unknown flag is set)")
+ "VFD ctl request failed (no ctl and fail if unknown flag is set)")
}
done:
diff --git a/src/H5FDcore.c b/src/H5FDcore.c
index a1750ee..0604316 100644
--- a/src/H5FDcore.c
+++ b/src/H5FDcore.c
@@ -152,6 +152,7 @@ static herr_t H5FD__core_delete(const char *filename, hid_t fapl_id);
static inline const H5FD_core_fapl_t *H5FD__core_get_default_config(void);
static const H5FD_class_t H5FD_core_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_CORE_VALUE, /* value */
"core", /* name */
MAXADDR, /* maxaddr */
@@ -180,6 +181,10 @@ static const H5FD_class_t H5FD_core_g = {
H5FD__core_get_handle, /* get_handle */
H5FD__core_read, /* read */
H5FD__core_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
H5FD__core_flush, /* flush */
H5FD__core_truncate, /* truncate */
H5FD__core_lock, /* lock */
diff --git a/src/H5FDdevelop.h b/src/H5FDdevelop.h
index 938f7f6..f5b32ed 100644
--- a/src/H5FDdevelop.h
+++ b/src/H5FDdevelop.h
@@ -25,6 +25,9 @@
/* Public Macros */
/*****************/
+/* H5FD_class_t struct version */
+#define H5FD_CLASS_VERSION 0x01 /* File driver struct version */
+
/* Map "fractal heap" header blocks to 'ohdr' type file memory, since its
* a fair amount of work to add a new kind of file memory and they are similar
* enough to object headers and probably too minor to deserve their own type.
@@ -160,6 +163,7 @@ typedef struct H5FD_t H5FD_t;
/* Class information for each file driver */
typedef struct H5FD_class_t {
+ unsigned version; /**< File driver class struct version # */
H5FD_class_value_t value;
const char * name;
haddr_t maxaddr;
@@ -188,6 +192,16 @@ typedef struct H5FD_class_t {
herr_t (*get_handle)(H5FD_t *file, hid_t fapl, void **file_handle);
herr_t (*read)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, void *buffer);
herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, const void *buffer);
+ herr_t (*read_vector)(H5FD_t *file, hid_t dxpl, uint32_t count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], void *bufs[]);
+ herr_t (*write_vector)(H5FD_t *file, hid_t dxpl, uint32_t count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], const void *bufs[]);
+ herr_t (*read_selection)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, size_t count, hid_t mem_spaces[],
+ hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[],
+ void *bufs[] /*out*/);
+ herr_t (*write_selection)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, size_t count, hid_t mem_spaces[],
+ hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[],
+ const void *bufs[] /*in*/);
herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
herr_t (*truncate)(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
herr_t (*lock)(H5FD_t *file, hbool_t rw);
@@ -223,6 +237,9 @@ struct H5FD_t {
hbool_t paged_aggr; /* Paged aggregation for file space is enabled or not */
};
+/* VFD initialization function */
+typedef hid_t (*H5FD_init_t)(void);
+
/********************/
/* Public Variables */
/********************/
@@ -235,7 +252,7 @@ struct H5FD_t {
extern "C" {
#endif
-H5_DLL hid_t H5FDperform_init(hid_t (*)(void));
+H5_DLL hid_t H5FDperform_init(H5FD_init_t op);
H5_DLL hid_t H5FDregister(const H5FD_class_t *cls);
H5_DLL htri_t H5FDis_driver_registered_by_name(const char *driver_name);
H5_DLL htri_t H5FDis_driver_registered_by_value(H5FD_class_value_t driver_value);
@@ -254,6 +271,16 @@ H5_DLL herr_t H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t ad
void *buf /*out*/);
H5_DLL herr_t H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
const void *buf);
+H5_DLL herr_t H5FDread_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[],
+ haddr_t addrs[], size_t sizes[], void *bufs[] /* out */);
+H5_DLL herr_t H5FDwrite_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[],
+ haddr_t addrs[], size_t sizes[], const void *bufs[] /* in */);
+H5_DLL herr_t H5FDread_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count,
+ hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[],
+ size_t element_sizes[], void *bufs[] /* out */);
+H5_DLL herr_t H5FDwrite_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count,
+ hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[],
+ size_t element_sizes[], const void *bufs[]);
H5_DLL herr_t H5FDflush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FDtruncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FDlock(H5FD_t *file, hbool_t rw);
diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c
index d07d909..25ee970 100644
--- a/src/H5FDdirect.c
+++ b/src/H5FDdirect.c
@@ -142,6 +142,7 @@ static herr_t H5FD__direct_unlock(H5FD_t *_file);
static herr_t H5FD__direct_delete(const char *filename, hid_t fapl_id);
static const H5FD_class_t H5FD_direct_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_DIRECT_VALUE, /* value */
"direct", /* name */
MAXADDR, /* maxaddr */
@@ -170,6 +171,10 @@ static const H5FD_class_t H5FD_direct_g = {
H5FD__direct_get_handle, /* get_handle */
H5FD__direct_read, /* read */
H5FD__direct_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
NULL, /* flush */
H5FD__direct_truncate, /* truncate */
H5FD__direct_lock, /* lock */
@@ -213,8 +218,11 @@ H5FD_direct_init(void)
else
ignore_disabled_file_locks_s = FAIL; /* Environment variable not set, or not set correctly */
- if (H5I_VFL != H5I_get_type(H5FD_DIRECT_g))
+ if (H5I_VFL != H5I_get_type(H5FD_DIRECT_g)) {
H5FD_DIRECT_g = H5FD_register(&H5FD_direct_g, sizeof(H5FD_class_t), FALSE);
+ if (H5I_INVALID_HID == H5FD_DIRECT_g)
+ HGOTO_ERROR(H5E_ID, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register direct");
+ }
/* Set return value */
ret_value = H5FD_DIRECT_g;
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index 4e54197..66a1a68 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -112,6 +112,7 @@ static herr_t H5FD__family_delete(const char *filename, hid_t fapl_id);
/* The class struct */
static const H5FD_class_t H5FD_family_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_FAMILY_VALUE, /* value */
"family", /* name */
HADDR_MAX, /* maxaddr */
@@ -140,6 +141,10 @@ static const H5FD_class_t H5FD_family_g = {
H5FD__family_get_handle, /* get_handle */
H5FD__family_read, /* read */
H5FD__family_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
H5FD__family_flush, /* flush */
H5FD__family_truncate, /* truncate */
H5FD__family_lock, /* lock */
@@ -233,20 +238,20 @@ H5FD__family_get_default_printf_filename(const char *old_filename)
HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, NULL, "can't allocate new filename buffer")
/* Determine if filename contains a ".h5" extension. */
- if ((file_extension = strstr(old_filename, ".h5"))) {
+ if ((file_extension = HDstrstr(old_filename, ".h5"))) {
/* Insert the printf format between the filename and ".h5" extension. */
HDstrcpy(tmp_buffer, old_filename);
- file_extension = strstr(tmp_buffer, ".h5");
+ file_extension = HDstrstr(tmp_buffer, ".h5");
HDsprintf(file_extension, "%s%s", suffix, ".h5");
}
- else if ((file_extension = strrchr(old_filename, '.'))) {
+ else if ((file_extension = HDstrrchr(old_filename, '.'))) {
char *new_extension_loc = NULL;
/* If the filename doesn't contain a ".h5" extension, but contains
* AN extension, just insert the printf format before that extension.
*/
HDstrcpy(tmp_buffer, old_filename);
- new_extension_loc = strrchr(tmp_buffer, '.');
+ new_extension_loc = HDstrrchr(tmp_buffer, '.');
HDsprintf(new_extension_loc, "%s%s", suffix, file_extension);
}
else {
diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c
index 4927a40..f0ffb62 100644
--- a/src/H5FDhdfs.c
+++ b/src/H5FDhdfs.c
@@ -278,6 +278,7 @@ static herr_t H5FD__hdfs_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing
static herr_t H5FD__hdfs_validate_config(const H5FD_hdfs_fapl_t *fa);
static const H5FD_class_t H5FD_hdfs_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_HDFS_VALUE, /* value */
"hdfs", /* name */
MAXADDR, /* maxaddr */
@@ -306,6 +307,10 @@ static const H5FD_class_t H5FD_hdfs_g = {
H5FD__hdfs_get_handle, /* get_handle */
H5FD__hdfs_read, /* read */
H5FD__hdfs_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
NULL, /* flush */
H5FD__hdfs_truncate, /* truncate */
NULL, /* lock */
diff --git a/src/H5FDint.c b/src/H5FDint.c
index d7fe33c..0c3fe9e 100644
--- a/src/H5FDint.c
+++ b/src/H5FDint.c
@@ -34,6 +34,7 @@
#include "H5Eprivate.h" /* Error handling */
#include "H5Fprivate.h" /* File access */
#include "H5FDpkg.h" /* File Drivers */
+#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */
#include "H5PLprivate.h" /* Plugins */
@@ -41,10 +42,51 @@
/* Local Macros */
/****************/
+/* Length of sequence lists requested from dataspace selections */
+#define H5FD_SEQ_LIST_LEN 128
+
+/* Length of stack allocated arrays for building vector I/O operations.
+ * Corresponds to the number of contiguous blocks in a selection I/O operation.
+ * If more space is needed dynamic allocation will be used instead. */
+#define H5FD_LOCAL_VECTOR_LEN 8
+
+/* Length of stack allocated arrays for dataspace IDs/structs for selection I/O
+ * operations. Corresponds to the number of file selection/memory selection
+ * pairs (along with addresses, etc.) in a selection I/O operation. If more
+ * space is needed dynamic allocation will be used instead */
+#define H5FD_LOCAL_SEL_ARR_LEN 8
+
/******************/
/* Local Typedefs */
/******************/
+/*************************************************************************
+ *
+ * H5FD_vsrt_tmp_t
+ *
+ * Structure used to store vector I/O request addresses and the associated
+ * indexes in the addrs[] array for the purpose of determine the sorted
+ * order.
+ *
+ * This is done by allocating an array of H5FD_vsrt_tmp_t of length
+ * count, loading it with the contents of the addrs[] array and the
+ * associated indices, and then sorting it.
+ *
+ * This sorted array of H5FD_vsrt_tmp_t is then used to populate sorted
+ * versions of the types[], addrs[], sizes[] and bufs[] vectors.
+ *
+ * addr: haddr_t containing the value of addrs[i],
+ *
+ * index: integer containing the value of i used to obtain the
+ * value of the addr field from the addrs[] vector.
+ *
+ *************************************************************************/
+
+typedef struct H5FD_vsrt_tmp_t {
+ haddr_t addr;
+ int index;
+} H5FD_vsrt_tmp_t;
+
/* Information needed for iterating over the registered VFD hid_t IDs.
* The name or value of the new VFD that is being registered is stored
* in the name (or value) field and the found_id field is initialized to
@@ -66,7 +108,13 @@ typedef struct H5FD_get_driver_ud_t {
/********************/
/* Local Prototypes */
/********************/
-static int H5FD__get_driver_cb(void *obj, hid_t id, void *_op_data);
+static int H5FD__get_driver_cb(void *obj, hid_t id, void *_op_data);
+static herr_t H5FD__read_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count,
+ H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[],
+ size_t element_sizes[], void *bufs[] /* out */);
+static herr_t H5FD__write_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count,
+ H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[],
+ size_t element_sizes[], const void *bufs[]);
/*********************/
/* Package Variables */
@@ -80,6 +128,9 @@ static int H5FD__get_driver_cb(void *obj, hid_t id, void *_op_data);
/* Local Variables */
/*******************/
+/* Declare extern free list to manage the H5S_sel_iter_t struct */
+H5FL_EXTERN(H5S_sel_iter_t);
+
/*-------------------------------------------------------------------------
* Function: H5FD_locate_signature
*
@@ -260,6 +311,1689 @@ done:
} /* end H5FD_write() */
/*-------------------------------------------------------------------------
+ * Function: H5FD_read_vector
+ *
+ * Purpose: Private version of H5FDread_vector()
+ *
+ * Perform count reads from the specified file at the offsets
+ * provided in the addrs array, with the lengths and memory
+ * types provided in the sizes and types arrays. Data read
+ * is returned in the buffers provided in the bufs array.
+ *
+ * If i > 0 and sizes[i] == 0, presume sizes[n] = sizes[i-1]
+ * for all n >= i and < count.
+ *
+ * Similarly, if i > 0 and types[i] == H5FD_MEM_NOLIST,
+ * presume types[n] = types[i-1] for all n >= i and < count.
+ *
+ * If the underlying VFD supports vector reads, pass the
+ * call through directly.
+ *
+ * If it doesn't, convert the vector read into a sequence
+ * of individual reads.
+ *
+ * Note that it is not in general possible to convert a
+ * vector read into a selection read, because each element
+ * in the vector read may have a different memory type.
+ * In contrast, selection reads are of a single type.
+ *
+ * Return: Success: SUCCEED
+ * All reads have completed successfully, and
+ * the results havce been into the supplied
+ * buffers.
+ *
+ * Failure: FAIL
+ * The contents of supplied buffers are undefined.
+ *
+ * Programmer: JRM -- 6/10/20
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ void *bufs[] /* out */)
+{
+ hbool_t addrs_cooked = FALSE;
+ hbool_t extend_sizes = FALSE;
+ hbool_t extend_types = FALSE;
+ uint32_t i;
+ size_t size;
+ H5FD_mem_t type;
+ hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert((types) || (count == 0));
+ HDassert((addrs) || (count == 0));
+ HDassert((sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* verify that the first elements of the sizes and types arrays are
+ * valid.
+ */
+ HDassert((count == 0) || (sizes[0] != 0));
+ HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+
+ /* Get proper DXPL for I/O */
+ dxpl_id = H5CX_get_dxpl();
+
+#ifndef H5_HAVE_PARALLEL
+ /* The no-op case
+ *
+ * Do not return early for Parallel mode since the I/O could be a
+ * collective transfer.
+ */
+ if (0 == count) {
+ HGOTO_DONE(SUCCEED)
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if (file->base_addr > 0) {
+
+ /* apply the base_addr offset to the addrs array. Must undo before
+ * we return.
+ */
+ for (i = 0; i < count; i++) {
+
+ addrs[i] += file->base_addr;
+ }
+ addrs_cooked = TRUE;
+ }
+
+ /* If the file is open for SWMR read access, allow access to data past
+ * the end of the allocated space (the 'eoa'). This is done because the
+ * eoa stored in the file's superblock might be out of sync with the
+ * objects being written within the file by the application performing
+ * SWMR write operations.
+ */
+ if ((!(file->access_flags & H5F_ACC_SWMR_READ)) && (count > 0)) {
+ haddr_t eoa;
+
+ extend_sizes = FALSE;
+ extend_types = FALSE;
+
+ for (i = 0; i < count; i++) {
+
+ if (!extend_sizes) {
+
+ if (sizes[i] == 0) {
+
+ extend_sizes = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+
+ size = sizes[i];
+ }
+ }
+
+ if (!extend_types) {
+
+ if (types[i] == H5FD_MEM_NOLIST) {
+
+ extend_types = TRUE;
+ type = types[i - 1];
+ }
+ else {
+
+ type = types[i];
+ }
+ }
+
+ if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
+
+ if ((addrs[i] + size) > eoa)
+
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL,
+ "addr overflow, addrs[%d] = %llu, sizes[%d] = %llu, eoa = %llu", (int)i,
+ (unsigned long long)(addrs[i]), (int)i, (unsigned long long)size,
+ (unsigned long long)eoa)
+ }
+ }
+
+ /* if the underlying VFD supports vector read, make the call */
+ if (file->cls->read_vector) {
+
+ if ((file->cls->read_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0)
+
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed")
+ }
+ else {
+
+ /* otherwise, implement the vector read as a sequence of regular
+ * read calls.
+ */
+ extend_sizes = FALSE;
+ extend_types = FALSE;
+
+ for (i = 0; i < count; i++) {
+
+ /* we have already verified that sizes[0] != 0 and
+ * types[0] != H5FD_MEM_NOLIST
+ */
+
+ if (!extend_sizes) {
+
+ if (sizes[i] == 0) {
+
+ extend_sizes = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+
+ size = sizes[i];
+ }
+ }
+
+ if (!extend_types) {
+
+ if (types[i] == H5FD_MEM_NOLIST) {
+
+ extend_types = TRUE;
+ type = types[i - 1];
+ }
+ else {
+
+ type = types[i];
+ }
+ }
+
+ if ((file->cls->read)(file, type, dxpl_id, addrs[i], size, bufs[i]) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed")
+ }
+ }
+
+done:
+ /* undo the base addr offset to the addrs array if necessary */
+ if (addrs_cooked) {
+
+ HDassert(file->base_addr > 0);
+
+ for (i = 0; i < count; i++) {
+
+ addrs[i] -= file->base_addr;
+ }
+ }
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_read_vector() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_write_vector
+ *
+ * Purpose: Private version of H5FDwrite_vector()
+ *
+ * Perform count writes to the specified file at the offsets
+ * provided in the addrs array, with the lengths and memory
+ * types provided in the sizes and types arrays. Data written
+ * is taken from the buffers provided in the bufs array.
+ *
+ * If i > 0 and sizes[i] == 0, presume sizes[n] = sizes[i-1]
+ * for all n >= i and < count.
+ *
+ * Similarly, if i > 0 and types[i] == H5FD_MEM_NOLIST,
+ * presume types[n] = types[i-1] for all n >= i and < count.
+ *
+ * If the underlying VFD supports vector writes, pass the
+ * call through directly.
+ *
+ * If it doesn't, convert the vector write into a sequence
+ * of individual writes.
+ *
+ * Note that it is not in general possible to convert a
+ * vector write into a selection write, because each element
+ * in the vector write may have a different memory type.
+ * In contrast, selection writes are of a single type.
+ *
+ * Return: Success: SUCCEED
+ * All writes have completed successfully.
+ *
+ * Failure: FAIL
+ * One or more writes failed.
+ *
+ * Programmer: JRM -- 6/10/20
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ const void *bufs[])
+{
+ hbool_t addrs_cooked = FALSE;
+ hbool_t extend_sizes = FALSE;
+ hbool_t extend_types = FALSE;
+ uint32_t i;
+ size_t size;
+ H5FD_mem_t type;
+ hid_t dxpl_id; /* DXPL for operation */
+ haddr_t eoa = HADDR_UNDEF; /* EOA for file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert((types) || (count == 0));
+ HDassert((addrs) || (count == 0));
+ HDassert((sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* verify that the first elements of the sizes and types arrays are
+ * valid.
+ */
+ HDassert((count == 0) || (sizes[0] != 0));
+ HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+
+ /* Get proper DXPL for I/O */
+ dxpl_id = H5CX_get_dxpl();
+
+#ifndef H5_HAVE_PARALLEL
+ /* The no-op case
+ *
+ * Do not return early for Parallel mode since the I/O could be a
+ * collective transfer.
+ */
+ if (0 == count)
+ HGOTO_DONE(SUCCEED)
+#endif /* H5_HAVE_PARALLEL */
+
+ if (file->base_addr > 0) {
+
+ /* apply the base_addr offset to the addrs array. Must undo before
+ * we return.
+ */
+ for (i = 0; i < count; i++) {
+
+ addrs[i] += file->base_addr;
+ }
+ addrs_cooked = TRUE;
+ }
+
+ extend_sizes = FALSE;
+ extend_types = FALSE;
+
+ for (i = 0; i < count; i++) {
+
+ if (!extend_sizes) {
+
+ if (sizes[i] == 0) {
+
+ extend_sizes = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+
+ size = sizes[i];
+ }
+ }
+
+ if (!extend_types) {
+
+ if (types[i] == H5FD_MEM_NOLIST) {
+
+ extend_types = TRUE;
+ type = types[i - 1];
+ }
+ else {
+
+ type = types[i];
+ }
+ }
+
+ if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
+
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
+
+ if ((addrs[i] + size) > eoa)
+
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addrs[%d] = %llu, sizes[%d] = %llu, \
+ eoa = %llu",
+ (int)i, (unsigned long long)(addrs[i]), (int)i, (unsigned long long)size,
+ (unsigned long long)eoa)
+ }
+
+ /* if the underlying VFD supports vector write, make the call */
+ if (file->cls->write_vector) {
+
+ if ((file->cls->write_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0)
+
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed")
+ }
+ else {
+ /* otherwise, implement the vector write as a sequence of regular
+ * write calls.
+ */
+ extend_sizes = FALSE;
+ extend_types = FALSE;
+
+ for (i = 0; i < count; i++) {
+
+ /* we have already verified that sizes[0] != 0 and
+ * types[0] != H5FD_MEM_NOLIST
+ */
+
+ if (!extend_sizes) {
+
+ if (sizes[i] == 0) {
+
+ extend_sizes = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+
+ size = sizes[i];
+ }
+ }
+
+ if (!extend_types) {
+
+ if (types[i] == H5FD_MEM_NOLIST) {
+
+ extend_types = TRUE;
+ type = types[i - 1];
+ }
+ else {
+
+ type = types[i];
+ }
+ }
+
+ if ((file->cls->write)(file, type, dxpl_id, addrs[i], size, bufs[i]) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver write request failed")
+ }
+ }
+
+done:
+ /* undo the base addr offset to the addrs array if necessary */
+ if (addrs_cooked) {
+
+ HDassert(file->base_addr > 0);
+
+ for (i = 0; i < count; i++) {
+
+ addrs[i] -= file->base_addr;
+ }
+ }
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_write_vector() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD__read_selection_translate
+ *
+ * Purpose: Translates a selection read call to a vector read call if
+ * vector reads are supported, or a series of scalar read
+ * calls otherwise.
+ *
+ * Return: Success: SUCCEED
+ * All reads have completed successfully, and
+ * the results havce been into the supplied
+ * buffers.
+ *
+ * Failure: FAIL
+ * The contents of supplied buffers are undefined.
+ *
+ * Programmer: NAF -- 5/13/21
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__read_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count,
+ H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[],
+ size_t element_sizes[], void *bufs[] /* out */)
+{
+ hbool_t extend_sizes = FALSE;
+ hbool_t extend_bufs = FALSE;
+ uint32_t i;
+ size_t element_size;
+ void * buf;
+ hbool_t use_vector = FALSE;
+ haddr_t addrs_local[H5FD_LOCAL_VECTOR_LEN];
+ haddr_t * addrs = addrs_local;
+ size_t sizes_local[H5FD_LOCAL_VECTOR_LEN];
+ size_t * sizes = sizes_local;
+ void * vec_bufs_local[H5FD_LOCAL_VECTOR_LEN];
+ void ** vec_bufs = vec_bufs_local;
+ hsize_t file_off[H5FD_SEQ_LIST_LEN];
+ size_t file_len[H5FD_SEQ_LIST_LEN];
+ hsize_t mem_off[H5FD_SEQ_LIST_LEN];
+ size_t mem_len[H5FD_SEQ_LIST_LEN];
+ size_t file_seq_i;
+ size_t mem_seq_i;
+ size_t file_nseq;
+ size_t mem_nseq;
+ size_t io_len;
+ size_t nelmts;
+ hssize_t hss_nelmts;
+ size_t seq_nelem;
+ H5S_sel_iter_t *file_iter = NULL;
+ H5S_sel_iter_t *mem_iter = NULL;
+ hbool_t file_iter_init = FALSE;
+ hbool_t mem_iter_init = FALSE;
+ H5FD_mem_t types[2] = {type, H5FD_MEM_NOLIST};
+ size_t vec_arr_nalloc = H5FD_LOCAL_VECTOR_LEN;
+ size_t vec_arr_nused = 0;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert(mem_spaces);
+ HDassert(file_spaces);
+ HDassert(offsets);
+ HDassert(element_sizes);
+ HDassert(bufs);
+
+ /* Verify that the first elements of the element_sizes and bufs arrays are
+ * valid. */
+ HDassert(element_sizes[0] != 0);
+ HDassert(bufs[0] != NULL);
+
+ /* Check if we're using vector I/O */
+ use_vector = file->cls->read_vector != NULL;
+
+ /* Allocate sequence lists for memory and file spaces */
+ if (NULL == (file_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate file selection iterator")
+ if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate memory selection iterator")
+
+ /* Loop over dataspaces */
+ for (i = 0; i < count; i++) {
+
+ /* we have already verified that element_sizes[0] != 0 and bufs[0]
+ * != NULL */
+
+ if (!extend_sizes) {
+
+ if (element_sizes[i] == 0) {
+
+ extend_sizes = TRUE;
+ element_size = element_sizes[i - 1];
+ }
+ else {
+
+ element_size = element_sizes[i];
+ }
+ }
+
+ if (!extend_bufs) {
+
+ if (bufs[i] == NULL) {
+
+ extend_bufs = TRUE;
+ buf = bufs[i - 1];
+ }
+ else {
+
+ buf = bufs[i];
+ }
+ }
+
+ /* Initialize sequence lists for memory and file spaces */
+ if (H5S_select_iter_init(file_iter, file_spaces[i], element_size, 0) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for file space")
+ file_iter_init = TRUE;
+ if (H5S_select_iter_init(mem_iter, mem_spaces[i], element_size, 0) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for memory space")
+ mem_iter_init = TRUE;
+
+ /* Get the number of elements in selection */
+ if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(file_spaces[i])) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
+ H5_CHECKED_ASSIGN(nelmts, size_t, hss_nelmts, hssize_t);
+
+#ifndef NDEBUG
+ /* Verify mem space has the same number of elements */
+ {
+ if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(mem_spaces[i])) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
+ HDassert((hssize_t)nelmts == hss_nelmts);
+ }
+#endif /* NDEBUG */
+
+ /* Initialize values so sequence lists are retrieved on the first
+ * iteration */
+ file_seq_i = H5FD_SEQ_LIST_LEN;
+ mem_seq_i = H5FD_SEQ_LIST_LEN;
+ file_nseq = 0;
+ mem_nseq = 0;
+
+ /* Loop until all elements are processed */
+ while (file_seq_i < file_nseq || nelmts > 0) {
+ /* Fill/refill file sequence list if necessary */
+ if (file_seq_i == H5FD_SEQ_LIST_LEN) {
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(file_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &file_nseq,
+ &seq_nelem, file_off, file_len) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+ HDassert(file_nseq > 0);
+
+ nelmts -= seq_nelem;
+ file_seq_i = 0;
+ }
+ HDassert(file_seq_i < file_nseq);
+
+ /* Fill/refill memory sequence list if necessary */
+ if (mem_seq_i == H5FD_SEQ_LIST_LEN) {
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(mem_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &mem_nseq, &seq_nelem,
+ mem_off, mem_len) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+ HDassert(mem_nseq > 0);
+
+ mem_seq_i = 0;
+ }
+ HDassert(mem_seq_i < mem_nseq);
+
+ /* Calculate length of this IO */
+ io_len = MIN(file_len[file_seq_i], mem_len[mem_seq_i]);
+
+ /* Check if we're using vector I/O */
+ if (use_vector) {
+ /* Check if we need to extend the arrays */
+ if (vec_arr_nused == vec_arr_nalloc) {
+ /* Check if we're using the static arrays */
+ if (addrs == addrs_local) {
+ HDassert(sizes == sizes_local);
+ HDassert(vec_bufs == vec_bufs_local);
+
+ /* Allocate dynamic arrays */
+ if (NULL == (addrs = H5MM_malloc(sizeof(addrs_local) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for address list")
+ if (NULL == (sizes = H5MM_malloc(sizeof(sizes_local) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for size list")
+ if (NULL == (vec_bufs = H5MM_malloc(sizeof(vec_bufs_local) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for buffer list")
+
+ /* Copy the existing data */
+ (void)H5MM_memcpy(addrs, addrs_local, sizeof(addrs_local));
+ (void)H5MM_memcpy(sizes, sizes_local, sizeof(sizes_local));
+ (void)H5MM_memcpy(vec_bufs, vec_bufs_local, sizeof(vec_bufs_local));
+ }
+ else {
+ void *tmp_ptr;
+
+ /* Reallocate arrays */
+ if (NULL == (tmp_ptr = H5MM_realloc(addrs, vec_arr_nalloc * sizeof(*addrs) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory reallocation failed for address list")
+ addrs = tmp_ptr;
+ if (NULL == (tmp_ptr = H5MM_realloc(sizes, vec_arr_nalloc * sizeof(*sizes) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory reallocation failed for size list")
+ sizes = tmp_ptr;
+ if (NULL ==
+ (tmp_ptr = H5MM_realloc(vec_bufs, vec_arr_nalloc * sizeof(*vec_bufs) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory reallocation failed for buffer list")
+ vec_bufs = tmp_ptr;
+ }
+
+ /* Record that we've doubled the array sizes */
+ vec_arr_nalloc *= 2;
+ }
+
+ /* Add this segment to vector read list */
+ addrs[vec_arr_nused] = offsets[i] + file_off[file_seq_i];
+ sizes[vec_arr_nused] = io_len;
+ vec_bufs[vec_arr_nused] = (void *)((uint8_t *)buf + mem_off[mem_seq_i]);
+ vec_arr_nused++;
+ }
+ else
+ /* Issue scalar read call */
+ if ((file->cls->read)(file, type, dxpl_id, offsets[i] + file_off[file_seq_i], io_len,
+ (void *)((uint8_t *)buf + mem_off[mem_seq_i])) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed")
+
+ /* Update file sequence */
+ if (io_len == file_len[file_seq_i])
+ file_seq_i++;
+ else {
+ file_off[file_seq_i] += io_len;
+ file_len[file_seq_i] -= io_len;
+ }
+
+ /* Update memory sequence */
+ if (io_len == mem_len[mem_seq_i])
+ mem_seq_i++;
+ else {
+ mem_off[mem_seq_i] += io_len;
+ mem_len[mem_seq_i] -= io_len;
+ }
+ }
+
+ /* Make sure both memory and file sequences terminated at the same time */
+ if (mem_seq_i < mem_nseq)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "file selection terminated before memory selection")
+
+ /* Terminate iterators */
+ if (H5S_SELECT_ITER_RELEASE(file_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator")
+ file_iter_init = FALSE;
+ if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator")
+ mem_iter_init = FALSE;
+ }
+
+ /* Issue vector read call if appropriate */
+ if (use_vector) {
+ H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t)
+ if ((file->cls->read_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) <
+ 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed")
+ }
+
+done:
+ /* Terminate and free iterators */
+ if (file_iter) {
+ if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator")
+ file_iter = H5FL_FREE(H5S_sel_iter_t, file_iter);
+ }
+ if (mem_iter) {
+ if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator")
+ mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter);
+ }
+
+ /* Cleanup vector arrays */
+ if (use_vector) {
+ if (addrs != addrs_local)
+ addrs = H5MM_xfree(addrs);
+ if (sizes != sizes_local)
+ sizes = H5MM_xfree(sizes);
+ if (vec_bufs != vec_bufs_local)
+ vec_bufs = H5MM_xfree(vec_bufs);
+ }
+
+ /* Make sure we cleaned up */
+ HDassert(!addrs || addrs == addrs_local);
+ HDassert(!sizes || sizes == sizes_local);
+ HDassert(!vec_bufs || vec_bufs == vec_bufs_local);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD__read_selection_translate() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_read_selection
+ *
+ * Purpose: Private version of H5FDread_selection()
+ *
+ * Perform count reads from the specified file at the
+ * locations selected in the dataspaces in the file_spaces
+ * array, with each of those dataspaces starting at the file
+ * address specified by the corresponding element of the
+ * offsets array, and with the size of each element in the
+ * dataspace specified by the corresponding element of the
+ * element_sizes array. The memory type provided by type is
+ * the same for all selections. Data read is returned in
+ * the locations selected in the dataspaces in the
+ * mem_spaces array, within the buffers provided in the
+ * corresponding elements of the bufs array.
+ *
+ * If i > 0 and element_sizes[i] == 0, presume
+ * element_sizes[n] = element_sizes[i-1] for all n >= i and
+ * < count.
+ *
+ * If the underlying VFD supports selection reads, pass the
+ * call through directly.
+ *
+ * If it doesn't, convert the vector read into a sequence
+ * of individual reads.
+ *
+ * Return: Success: SUCCEED
+ * All reads have completed successfully, and
+ * the results havce been into the supplied
+ * buffers.
+ *
+ * Failure: FAIL
+ * The contents of supplied buffers are undefined.
+ *
+ * Programmer: NAF -- 3/29/21
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces, H5S_t **file_spaces,
+ haddr_t offsets[], size_t element_sizes[], void *bufs[] /* out */)
+{
+ hbool_t offsets_cooked = FALSE;
+ hid_t mem_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN];
+ hid_t * mem_space_ids = mem_space_ids_local;
+ hid_t file_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN];
+ hid_t * file_space_ids = file_space_ids_local;
+ uint32_t num_spaces = 0;
+ hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */
+ uint32_t i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert((mem_spaces) || (count == 0));
+ HDassert((file_spaces) || (count == 0));
+ HDassert((offsets) || (count == 0));
+ HDassert((element_sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* Verify that the first elements of the element_sizes and bufs arrays are
+ * valid. */
+ HDassert((count == 0) || (element_sizes[0] != 0));
+ HDassert((count == 0) || (bufs[0] != NULL));
+
+ /* Get proper DXPL for I/O */
+ dxpl_id = H5CX_get_dxpl();
+
+#ifndef H5_HAVE_PARALLEL
+ /* The no-op case
+ *
+ * Do not return early for Parallel mode since the I/O could be a
+ * collective transfer.
+ */
+ if (0 == count) {
+ HGOTO_DONE(SUCCEED)
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if (file->base_addr > 0) {
+
+ /* apply the base_addr offset to the offsets array. Must undo before
+ * we return.
+ */
+ for (i = 0; i < count; i++) {
+
+ offsets[i] += file->base_addr;
+ }
+ offsets_cooked = TRUE;
+ }
+
+ /* If the file is open for SWMR read access, allow access to data past
+ * the end of the allocated space (the 'eoa'). This is done because the
+ * eoa stored in the file's superblock might be out of sync with the
+ * objects being written within the file by the application performing
+ * SWMR write operations.
+ */
+ /* For now at least, only check that the offset is not past the eoa, since
+ * looking into the highest offset in the selection (different from the
+ * bounds) is potentially expensive.
+ */
+ if (!(file->access_flags & H5F_ACC_SWMR_READ)) {
+ haddr_t eoa;
+
+ if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
+
+ for (i = 0; i < count; i++) {
+
+ if ((offsets[i]) > eoa)
+
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu",
+ (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa)
+ }
+ }
+
+ /* if the underlying VFD supports selection read, make the call */
+ if (file->cls->read_selection) {
+ /* Allocate array of space IDs if necessary, otherwise use local
+ * buffers */
+ if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) {
+ if (NULL == (mem_space_ids = H5MM_malloc(count * sizeof(hid_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ if (NULL == (file_space_ids = H5MM_malloc(count * sizeof(hid_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ }
+
+ /* Create IDs for all dataspaces */
+ for (; num_spaces < count; num_spaces++) {
+ if ((mem_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, mem_spaces[num_spaces], TRUE)) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID")
+
+ if ((file_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, file_spaces[num_spaces], TRUE)) <
+ 0) {
+ if (H5I_dec_app_ref(mem_space_ids[num_spaces]) < 0)
+ HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID")
+ }
+ }
+
+ if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed")
+ }
+ else
+ /* Otherwise, implement the selection read as a sequence of regular
+ * or vector read calls.
+ */
+ if (H5FD__read_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "translation to vector or scalar read failed")
+
+done:
+ /* undo the base addr offset to the offsets array if necessary */
+ if (offsets_cooked) {
+
+ HDassert(file->base_addr > 0);
+
+ for (i = 0; i < count; i++) {
+
+ offsets[i] -= file->base_addr;
+ }
+ }
+
+ /* Cleanup dataspace arrays */
+ for (i = 0; i < num_spaces; i++) {
+ if (H5I_dec_app_ref(mem_space_ids[i]) < 0)
+ HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id")
+ if (H5I_dec_app_ref(file_space_ids[i]) < 0)
+ HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id")
+ }
+ if (mem_space_ids != mem_space_ids_local)
+ mem_space_ids = H5MM_xfree(mem_space_ids);
+ if (file_space_ids != file_space_ids_local)
+ file_space_ids = H5MM_xfree(file_space_ids);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_read_selection() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_read_selection_id
+ *
+ * Purpose: Like H5FD_read_selection(), but takes hid_t arrays instead
+ * of H5S_t * arrays for the dataspaces.
+ *
+ * Return: Success: SUCCEED
+ * All reads have completed successfully, and
+ * the results havce been into the supplied
+ * buffers.
+ *
+ * Failure: FAIL
+ * The contents of supplied buffers are undefined.
+ *
+ * Programmer: NAF -- 5/19/21
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_read_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[],
+ hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[],
+ void *bufs[] /* out */)
+{
+ hbool_t offsets_cooked = FALSE;
+ H5S_t * mem_spaces_local[H5FD_LOCAL_SEL_ARR_LEN];
+ H5S_t ** mem_spaces = mem_spaces_local;
+ H5S_t * file_spaces_local[H5FD_LOCAL_SEL_ARR_LEN];
+ H5S_t ** file_spaces = file_spaces_local;
+ hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */
+ uint32_t i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert((mem_space_ids) || (count == 0));
+ HDassert((file_space_ids) || (count == 0));
+ HDassert((offsets) || (count == 0));
+ HDassert((element_sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* Verify that the first elements of the element_sizes and bufs arrays are
+ * valid. */
+ HDassert((count == 0) || (element_sizes[0] != 0));
+ HDassert((count == 0) || (bufs[0] != NULL));
+
+ /* Get proper DXPL for I/O */
+ dxpl_id = H5CX_get_dxpl();
+
+#ifndef H5_HAVE_PARALLEL
+ /* The no-op case
+ *
+ * Do not return early for Parallel mode since the I/O could be a
+ * collective transfer.
+ */
+ if (0 == count) {
+ HGOTO_DONE(SUCCEED)
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if (file->base_addr > 0) {
+
+ /* apply the base_addr offset to the offsets array. Must undo before
+ * we return.
+ */
+ for (i = 0; i < count; i++) {
+
+ offsets[i] += file->base_addr;
+ }
+ offsets_cooked = TRUE;
+ }
+
+ /* If the file is open for SWMR read access, allow access to data past
+ * the end of the allocated space (the 'eoa'). This is done because the
+ * eoa stored in the file's superblock might be out of sync with the
+ * objects being written within the file by the application performing
+ * SWMR write operations.
+ */
+ /* For now at least, only check that the offset is not past the eoa, since
+ * looking into the highest offset in the selection (different from the
+ * bounds) is potentially expensive.
+ */
+ if (!(file->access_flags & H5F_ACC_SWMR_READ)) {
+ haddr_t eoa;
+
+ if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
+
+ for (i = 0; i < count; i++) {
+
+ if ((offsets[i]) > eoa)
+
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu",
+ (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa)
+ }
+ }
+
+ /* if the underlying VFD supports selection read, make the call */
+ if (file->cls->read_selection) {
+ if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed")
+ }
+ else {
+ /* Otherwise, implement the selection read as a sequence of regular
+ * or vector read calls.
+ */
+
+ /* Allocate arrays of space objects if necessary, otherwise use local
+ * buffers */
+ if (count > sizeof(mem_spaces_local) / sizeof(mem_spaces_local[0])) {
+ if (NULL == (mem_spaces = H5MM_malloc(count * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ if (NULL == (file_spaces = H5MM_malloc(count * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ }
+
+ /* Get object pointers for all dataspaces */
+ for (i = 0; i < count; i++) {
+ if (NULL == (mem_spaces[i] = (H5S_t *)H5I_object_verify(mem_space_ids[i], H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve memory dataspace from ID")
+ if (NULL == (file_spaces[i] = (H5S_t *)H5I_object_verify(file_space_ids[i], H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve file dataspace from ID")
+ }
+
+ /* Translate to vector or scalar I/O */
+ if (H5FD__read_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "translation to vector or scalar read failed")
+ }
+
+done:
+ /* undo the base addr offset to the offsets array if necessary */
+ if (offsets_cooked) {
+
+ HDassert(file->base_addr > 0);
+
+ for (i = 0; i < count; i++) {
+
+ offsets[i] -= file->base_addr;
+ }
+ }
+
+ /* Cleanup dataspace arrays */
+ if (mem_spaces != mem_spaces_local)
+ mem_spaces = H5MM_xfree(mem_spaces);
+ if (file_spaces != file_spaces_local)
+ file_spaces = H5MM_xfree(file_spaces);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_read_selection_id() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD__write_selection_translate
+ *
+ * Purpose: Translates a selection write call to a vector write call
+ * if vector writes are supported, or a series of scalar
+ * write calls otherwise.
+ *
+ * Return: Success: SUCCEED
+ * All writes have completed successfully.
+ *
+ * Failure: FAIL
+ * One or more writes failed.
+ *
+ * Programmer: NAF -- 5/13/21
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__write_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count,
+ H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[],
+ size_t element_sizes[], const void *bufs[])
+{
+ hbool_t extend_sizes = FALSE;
+ hbool_t extend_bufs = FALSE;
+ uint32_t i;
+ size_t element_size;
+ const void * buf;
+ hbool_t use_vector = FALSE;
+ haddr_t addrs_local[H5FD_LOCAL_VECTOR_LEN];
+ haddr_t * addrs = addrs_local;
+ size_t sizes_local[H5FD_LOCAL_VECTOR_LEN];
+ size_t * sizes = sizes_local;
+ const void * vec_bufs_local[H5FD_LOCAL_VECTOR_LEN];
+ const void ** vec_bufs = vec_bufs_local;
+ hsize_t file_off[H5FD_SEQ_LIST_LEN];
+ size_t file_len[H5FD_SEQ_LIST_LEN];
+ hsize_t mem_off[H5FD_SEQ_LIST_LEN];
+ size_t mem_len[H5FD_SEQ_LIST_LEN];
+ size_t file_seq_i;
+ size_t mem_seq_i;
+ size_t file_nseq;
+ size_t mem_nseq;
+ size_t io_len;
+ size_t nelmts;
+ hssize_t hss_nelmts;
+ size_t seq_nelem;
+ H5S_sel_iter_t *file_iter = NULL;
+ H5S_sel_iter_t *mem_iter = NULL;
+ hbool_t file_iter_init = FALSE;
+ hbool_t mem_iter_init = FALSE;
+ H5FD_mem_t types[2] = {type, H5FD_MEM_NOLIST};
+ size_t vec_arr_nalloc = H5FD_LOCAL_VECTOR_LEN;
+ size_t vec_arr_nused = 0;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert(mem_spaces);
+ HDassert(file_spaces);
+ HDassert(offsets);
+ HDassert(element_sizes);
+ HDassert(bufs);
+
+ /* Verify that the first elements of the element_sizes and bufs arrays are
+ * valid. */
+ HDassert(element_sizes[0] != 0);
+ HDassert(bufs[0] != NULL);
+
+ /* Check if we're using vector I/O */
+ use_vector = file->cls->write_vector != NULL;
+
+ /* Allocate sequence lists for memory and file spaces */
+ if (NULL == (file_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate file selection iterator")
+ if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate memory selection iterator")
+
+ /* Loop over dataspaces */
+ for (i = 0; i < count; i++) {
+
+ /* we have already verified that element_sizes[0] != 0 and bufs[0]
+ * != NULL */
+
+ if (!extend_sizes) {
+
+ if (element_sizes[i] == 0) {
+
+ extend_sizes = TRUE;
+ element_size = element_sizes[i - 1];
+ }
+ else {
+
+ element_size = element_sizes[i];
+ }
+ }
+
+ if (!extend_bufs) {
+
+ if (bufs[i] == NULL) {
+
+ extend_bufs = TRUE;
+ buf = bufs[i - 1];
+ }
+ else {
+
+ buf = bufs[i];
+ }
+ }
+
+ /* Initialize sequence lists for memory and file spaces */
+ if (H5S_select_iter_init(file_iter, file_spaces[i], element_size, 0) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for file space")
+ file_iter_init = TRUE;
+ if (H5S_select_iter_init(mem_iter, mem_spaces[i], element_size, 0) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for memory space")
+ mem_iter_init = TRUE;
+
+ /* Get the number of elements in selection */
+ if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(file_spaces[i])) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
+ H5_CHECKED_ASSIGN(nelmts, size_t, hss_nelmts, hssize_t);
+
+#ifndef NDEBUG
+ /* Verify mem space has the same number of elements */
+ {
+ if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(mem_spaces[i])) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
+ HDassert((hssize_t)nelmts == hss_nelmts);
+ }
+#endif /* NDEBUG */
+
+ /* Initialize values so sequence lists are retrieved on the first
+ * iteration */
+ file_seq_i = H5FD_SEQ_LIST_LEN;
+ mem_seq_i = H5FD_SEQ_LIST_LEN;
+ file_nseq = 0;
+ mem_nseq = 0;
+
+ /* Loop until all elements are processed */
+ while (file_seq_i < file_nseq || nelmts > 0) {
+ /* Fill/refill file sequence list if necessary */
+ if (file_seq_i == H5FD_SEQ_LIST_LEN) {
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(file_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &file_nseq,
+ &seq_nelem, file_off, file_len) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+ HDassert(file_nseq > 0);
+
+ nelmts -= seq_nelem;
+ file_seq_i = 0;
+ }
+ HDassert(file_seq_i < file_nseq);
+
+ /* Fill/refill memory sequence list if necessary */
+ if (mem_seq_i == H5FD_SEQ_LIST_LEN) {
+ if (H5S_SELECT_ITER_GET_SEQ_LIST(mem_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &mem_nseq, &seq_nelem,
+ mem_off, mem_len) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed")
+ HDassert(mem_nseq > 0);
+
+ mem_seq_i = 0;
+ }
+ HDassert(mem_seq_i < mem_nseq);
+
+ /* Calculate length of this IO */
+ io_len = MIN(file_len[file_seq_i], mem_len[mem_seq_i]);
+
+ /* Check if we're using vector I/O */
+ if (use_vector) {
+ /* Check if we need to extend the arrays */
+ if (vec_arr_nused == vec_arr_nalloc) {
+ /* Check if we're using the static arrays */
+ if (addrs == addrs_local) {
+ HDassert(sizes == sizes_local);
+ HDassert(vec_bufs == vec_bufs_local);
+
+ /* Allocate dynamic arrays */
+ if (NULL == (addrs = H5MM_malloc(sizeof(addrs_local) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for address list")
+ if (NULL == (sizes = H5MM_malloc(sizeof(sizes_local) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for size list")
+ if (NULL == (vec_bufs = H5MM_malloc(sizeof(vec_bufs_local) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory allocation failed for buffer list")
+
+ /* Copy the existing data */
+ (void)H5MM_memcpy(addrs, addrs_local, sizeof(addrs_local));
+ (void)H5MM_memcpy(sizes, sizes_local, sizeof(sizes_local));
+ (void)H5MM_memcpy(vec_bufs, vec_bufs_local, sizeof(vec_bufs_local));
+ }
+ else {
+ void *tmp_ptr;
+
+ /* Reallocate arrays */
+ if (NULL == (tmp_ptr = H5MM_realloc(addrs, vec_arr_nalloc * sizeof(*addrs) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory reallocation failed for address list")
+ addrs = tmp_ptr;
+ if (NULL == (tmp_ptr = H5MM_realloc(sizes, vec_arr_nalloc * sizeof(*sizes) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory reallocation failed for size list")
+ sizes = tmp_ptr;
+ if (NULL ==
+ (tmp_ptr = H5MM_realloc(vec_bufs, vec_arr_nalloc * sizeof(*vec_bufs) * 2)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "memory reallocation failed for buffer list")
+ vec_bufs = tmp_ptr;
+ }
+
+ /* Record that we've doubled the array sizes */
+ vec_arr_nalloc *= 2;
+ }
+
+ /* Add this segment to vector write list */
+ addrs[vec_arr_nused] = offsets[i] + file_off[file_seq_i];
+ sizes[vec_arr_nused] = io_len;
+ vec_bufs[vec_arr_nused] = (const void *)((const uint8_t *)buf + mem_off[mem_seq_i]);
+ vec_arr_nused++;
+ }
+ else
+ /* Issue scalar write call */
+ if ((file->cls->write)(file, type, dxpl_id, offsets[i] + file_off[file_seq_i], io_len,
+ (const void *)((const uint8_t *)buf + mem_off[mem_seq_i])) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write request failed")
+
+ /* Update file sequence */
+ if (io_len == file_len[file_seq_i])
+ file_seq_i++;
+ else {
+ file_off[file_seq_i] += io_len;
+ file_len[file_seq_i] -= io_len;
+ }
+
+ /* Update memory sequence */
+ if (io_len == mem_len[mem_seq_i])
+ mem_seq_i++;
+ else {
+ mem_off[mem_seq_i] += io_len;
+ mem_len[mem_seq_i] -= io_len;
+ }
+ }
+
+ /* Make sure both memory and file sequences terminated at the same time */
+ if (mem_seq_i < mem_nseq)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "file selection terminated before memory selection")
+
+ /* Terminate iterators */
+ if (H5S_SELECT_ITER_RELEASE(file_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator")
+ file_iter_init = FALSE;
+ if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator")
+ mem_iter_init = FALSE;
+ }
+
+ /* Issue vector write call if appropriate */
+ if (use_vector) {
+ H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t)
+ if ((file->cls->write_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) <
+ 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed")
+ }
+
+done:
+ /* Terminate and free iterators */
+ if (file_iter) {
+ if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator")
+ file_iter = H5FL_FREE(H5S_sel_iter_t, file_iter);
+ }
+ if (mem_iter) {
+ if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator")
+ mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter);
+ }
+
+ /* Cleanup vector arrays */
+ if (use_vector) {
+ if (addrs != addrs_local)
+ addrs = H5MM_xfree(addrs);
+ if (sizes != sizes_local)
+ sizes = H5MM_xfree(sizes);
+ if (vec_bufs != vec_bufs_local)
+ vec_bufs = H5MM_xfree(vec_bufs);
+ }
+
+ /* Make sure we cleaned up */
+ HDassert(!addrs || addrs == addrs_local);
+ HDassert(!sizes || sizes == sizes_local);
+ HDassert(!vec_bufs || vec_bufs == vec_bufs_local);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD__write_selection_translate() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_write_selection
+ *
+ * Purpose: Private version of H5FDwrite_selection()
+ *
+ * Perform count writes to the specified file at the
+ * locations selected in the dataspaces in the file_spaces
+ * array, with each of those dataspaces starting at the file
+ * address specified by the corresponding element of the
+ * offsets array, and with the size of each element in the
+ * dataspace specified by the corresponding element of the
+ * element_sizes array. The memory type provided by type is
+ * the same for all selections. Data write is from
+ * the locations selected in the dataspaces in the
+ * mem_spaces array, within the buffers provided in the
+ * corresponding elements of the bufs array.
+ *
+ * If i > 0 and element_sizes[i] == 0, presume
+ * element_sizes[n] = element_sizes[i-1] for all n >= i and
+ * < count.
+ *
+ * If the underlying VFD supports selection writes, pass the
+ * call through directly.
+ *
+ * If it doesn't, convert the vector write into a sequence
+ * of individual writes.
+ *
+ * Return: Success: SUCCEED
+ * All writes have completed successfully.
+ *
+ * Failure: FAIL
+ * One or more writes failed.
+ *
+ * Programmer: NAF -- 3/29/21
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces, H5S_t **file_spaces,
+ haddr_t offsets[], size_t element_sizes[], const void *bufs[])
+{
+ hbool_t offsets_cooked = FALSE;
+ hid_t mem_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN];
+ hid_t * mem_space_ids = mem_space_ids_local;
+ hid_t file_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN];
+ hid_t * file_space_ids = file_space_ids_local;
+ uint32_t num_spaces = 0;
+ hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */
+ uint32_t i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert((mem_spaces) || (count == 0));
+ HDassert((file_spaces) || (count == 0));
+ HDassert((offsets) || (count == 0));
+ HDassert((element_sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* Verify that the first elements of the element_sizes and bufs arrays are
+ * valid. */
+ HDassert((count == 0) || (element_sizes[0] != 0));
+ HDassert((count == 0) || (bufs[0] != NULL));
+
+ /* Get proper DXPL for I/O */
+ dxpl_id = H5CX_get_dxpl();
+
+#ifndef H5_HAVE_PARALLEL
+ /* The no-op case
+ *
+ * Do not return early for Parallel mode since the I/O could be a
+ * collective transfer.
+ */
+ if (0 == count) {
+ HGOTO_DONE(SUCCEED)
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if (file->base_addr > 0) {
+
+ /* apply the base_addr offset to the offsets array. Must undo before
+ * we return.
+ */
+ for (i = 0; i < count; i++) {
+
+ offsets[i] += file->base_addr;
+ }
+ offsets_cooked = TRUE;
+ }
+
+ /* For now at least, only check that the offset is not past the eoa, since
+ * looking into the highest offset in the selection (different from the
+ * bounds) is potentially expensive.
+ */
+ {
+ haddr_t eoa;
+
+ if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
+
+ for (i = 0; i < count; i++) {
+
+ if ((offsets[i]) > eoa)
+
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu",
+ (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa)
+ }
+ }
+
+ /* if the underlying VFD supports selection write, make the call */
+ if (file->cls->write_selection) {
+ /* Allocate array of space IDs if necessary, otherwise use local
+ * buffers */
+ if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) {
+ if (NULL == (mem_space_ids = H5MM_malloc(count * sizeof(hid_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ if (NULL == (file_space_ids = H5MM_malloc(count * sizeof(hid_t))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ }
+
+ /* Create IDs for all dataspaces */
+ for (; num_spaces < count; num_spaces++) {
+ if ((mem_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, mem_spaces[num_spaces], TRUE)) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID")
+
+ if ((file_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, file_spaces[num_spaces], TRUE)) <
+ 0) {
+ if (H5I_dec_app_ref(mem_space_ids[num_spaces]) < 0)
+ HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID")
+ }
+ }
+
+ if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed")
+ }
+ else
+ /* Otherwise, implement the selection write as a sequence of regular
+ * or vector write calls.
+ */
+ if (H5FD__write_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "translation to vector or scalar write failed")
+
+done:
+ /* undo the base addr offset to the offsets array if necessary */
+ if (offsets_cooked) {
+
+ HDassert(file->base_addr > 0);
+
+ for (i = 0; i < count; i++) {
+
+ offsets[i] -= file->base_addr;
+ }
+ }
+
+ /* Cleanup dataspace arrays */
+ for (i = 0; i < num_spaces; i++) {
+ if (H5I_dec_app_ref(mem_space_ids[i]) < 0)
+ HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id")
+ if (H5I_dec_app_ref(file_space_ids[i]) < 0)
+ HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id")
+ }
+ if (mem_space_ids != mem_space_ids_local)
+ mem_space_ids = H5MM_xfree(mem_space_ids);
+ if (file_space_ids != file_space_ids_local)
+ file_space_ids = H5MM_xfree(file_space_ids);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_write_selection() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD_write_selection_id
+ *
+ * Purpose: Like H5FD_write_selection(), but takes hid_t arrays
+ * instead of H5S_t * arrays for the dataspaces.
+ *
+ * Return: Success: SUCCEED
+ * All writes have completed successfully.
+ *
+ * Failure: FAIL
+ * One or more writes failed.
+ *
+ * Programmer: NAF -- 5/19/21
+ *
+ * Changes: None
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_write_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[],
+ hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], const void *bufs[])
+{
+ hbool_t offsets_cooked = FALSE;
+ H5S_t * mem_spaces_local[H5FD_LOCAL_SEL_ARR_LEN];
+ H5S_t ** mem_spaces = mem_spaces_local;
+ H5S_t * file_spaces_local[H5FD_LOCAL_SEL_ARR_LEN];
+ H5S_t ** file_spaces = file_spaces_local;
+ hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */
+ uint32_t i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(file->cls);
+ HDassert((mem_space_ids) || (count == 0));
+ HDassert((file_space_ids) || (count == 0));
+ HDassert((offsets) || (count == 0));
+ HDassert((element_sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* Verify that the first elements of the element_sizes and bufs arrays are
+ * valid. */
+ HDassert((count == 0) || (element_sizes[0] != 0));
+ HDassert((count == 0) || (bufs[0] != NULL));
+
+ /* Get proper DXPL for I/O */
+ dxpl_id = H5CX_get_dxpl();
+
+#ifndef H5_HAVE_PARALLEL
+ /* The no-op case
+ *
+ * Do not return early for Parallel mode since the I/O could be a
+ * collective transfer.
+ */
+ if (0 == count) {
+ HGOTO_DONE(SUCCEED)
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+ if (file->base_addr > 0) {
+
+ /* apply the base_addr offset to the offsets array. Must undo before
+ * we return.
+ */
+ for (i = 0; i < count; i++) {
+
+ offsets[i] += file->base_addr;
+ }
+ offsets_cooked = TRUE;
+ }
+
+ /* For now at least, only check that the offset is not past the eoa, since
+ * looking into the highest offset in the selection (different from the
+ * bounds) is potentially expensive.
+ */
+ {
+ haddr_t eoa;
+
+ if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type)))
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed")
+
+ for (i = 0; i < count; i++) {
+
+ if ((offsets[i]) > eoa)
+
+ HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu",
+ (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa)
+ }
+ }
+
+ /* if the underlying VFD supports selection write, make the call */
+ if (file->cls->write_selection) {
+ if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed")
+ }
+ else {
+ /* Otherwise, implement the selection write as a sequence of regular
+ * or vector write calls.
+ */
+
+ /* Allocate arrays of space objects if necessary, otherwise use local
+ * buffers */
+ if (count > sizeof(mem_spaces_local) / sizeof(mem_spaces_local[0])) {
+ if (NULL == (mem_spaces = H5MM_malloc(count * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ if (NULL == (file_spaces = H5MM_malloc(count * sizeof(H5S_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list")
+ }
+
+ /* Get object pointers for all dataspaces */
+ for (i = 0; i < count; i++) {
+ if (NULL == (mem_spaces[i] = (H5S_t *)H5I_object_verify(mem_space_ids[i], H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve memory dataspace from ID")
+ if (NULL == (file_spaces[i] = (H5S_t *)H5I_object_verify(file_space_ids[i], H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve file dataspace from ID")
+ }
+
+ /* Translate to vector or scalar I/O */
+ if (H5FD__write_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets,
+ element_sizes, bufs) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "translation to vector or scalar write failed")
+ }
+
+done:
+ /* undo the base addr offset to the offsets array if necessary */
+ if (offsets_cooked) {
+
+ HDassert(file->base_addr > 0);
+
+ for (i = 0; i < count; i++) {
+
+ offsets[i] -= file->base_addr;
+ }
+ }
+
+ /* Cleanup dataspace arrays */
+ if (mem_spaces != mem_spaces_local)
+ mem_spaces = H5MM_xfree(mem_spaces);
+ if (file_spaces != file_spaces_local)
+ file_spaces = H5MM_xfree(file_spaces);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_write_selection_id() */
+
+/*-------------------------------------------------------------------------
* Function: H5FD_set_eoa
*
* Purpose: Private version of H5FDset_eoa()
@@ -399,6 +2133,246 @@ H5FD_driver_query(const H5FD_class_t *driver, unsigned long *flags /*out*/)
} /* end H5FD_driver_query() */
/*-------------------------------------------------------------------------
+ * Function: H5FD_sort_vector_io_req
+ *
+ * Purpose: Determine whether the supplied vector I/O request is
+ * sorted.
+ *
+ * if is is, set *vector_was_sorted to TRUE, set:
+ *
+ * *s_types_ptr = types
+ * *s_addrs_ptr = addrs
+ * *s_sizes_ptr = sizes
+ * *s_bufs_ptr = bufs
+ *
+ * and return.
+ *
+ * If it is not sorted, duplicate the type, addrs, sizes,
+ * and bufs vectors, storing the base addresses of the new
+ * vectors in *s_types_ptr, *s_addrs_ptr, *s_sizes_ptr, and
+ * *s_bufs_ptr respectively. Determine the sorted order
+ * of the vector I/O request, and load it into the new
+ * vectors in sorted order.
+ *
+ * Note that in this case, it is the callers responsibility
+ * to free the sorted vectors.
+ *
+ * JRM -- 3/15/21
+ *
+ * Return: SUCCEED/FAIL
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static int
+H5FD__vsrt_tmp_cmp(const void *element_1, const void *element_2)
+{
+ haddr_t addr_1 = ((const H5FD_vsrt_tmp_t *)element_1)->addr;
+ haddr_t addr_2 = ((const H5FD_vsrt_tmp_t *)element_2)->addr;
+ int ret_value = 0; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(H5F_addr_defined(addr_1));
+ HDassert(H5F_addr_defined(addr_2));
+
+ /* Compare the addresses */
+ if (H5F_addr_gt(addr_1, addr_2))
+ ret_value = 1;
+ else if (H5F_addr_lt(addr_1, addr_2))
+ ret_value = -1;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5FD__vsrt_tmp_cmp() */
+
+herr_t
+H5FD_sort_vector_io_req(hbool_t *vector_was_sorted, uint32_t _count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], H5_flexible_const_ptr_t bufs[], H5FD_mem_t **s_types_ptr,
+ haddr_t **s_addrs_ptr, size_t **s_sizes_ptr, H5_flexible_const_ptr_t **s_bufs_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ size_t count = (size_t)_count;
+ size_t i;
+ struct H5FD_vsrt_tmp_t *srt_tmp = NULL;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+
+ HDassert(vector_was_sorted);
+
+ HDassert((types) || (count == 0));
+ HDassert((addrs) || (count == 0));
+ HDassert((sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* verify that the first elements of the sizes and types arrays are
+ * valid.
+ */
+ HDassert((count == 0) || (sizes[0] != 0));
+ HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+
+ HDassert((count == 0) || ((s_types_ptr) && (NULL == *s_types_ptr)));
+ HDassert((count == 0) || ((s_addrs_ptr) && (NULL == *s_addrs_ptr)));
+ HDassert((count == 0) || ((s_sizes_ptr) && (NULL == *s_sizes_ptr)));
+ HDassert((count == 0) || ((s_bufs_ptr) && (NULL == *s_bufs_ptr)));
+
+ /* scan the addrs array to see if it is sorted */
+ for (i = 1; i < count; i++) {
+ HDassert(H5F_addr_defined(addrs[i - 1]));
+
+ if (H5F_addr_gt(addrs[i - 1], addrs[i]))
+ break;
+ else if (H5F_addr_eq(addrs[i - 1], addrs[i]))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "duplicate addr in vector")
+ }
+
+ /* if we traversed the entire array without breaking out, then
+ * the array was already sorted */
+ if (i >= count)
+ *vector_was_sorted = TRUE;
+ else
+ *vector_was_sorted = FALSE;
+
+ if (*vector_was_sorted) {
+
+ *s_types_ptr = types;
+ *s_addrs_ptr = addrs;
+ *s_sizes_ptr = sizes;
+ *s_bufs_ptr = bufs;
+ }
+ else {
+
+ /* must sort the addrs array in increasing addr order, while
+ * maintaining the association between each addr, and the
+ * sizes[], types[], and bufs[] values at the same index.
+ *
+ * Do this by allocating an array of struct H5FD_vsrt_tmp_t, where
+ * each instance of H5FD_vsrt_tmp_t has two fields, addr and index.
+ * Load the array with the contents of the addrs array and
+ * the index of the associated entry. Sort the array, allocate
+ * the s_types_ptr, s_addrs_ptr, s_sizes_ptr, and s_bufs_ptr
+ * arrays and populate them using the mapping provided by
+ * the sorted array of H5FD_vsrt_tmp_t.
+ */
+ int j;
+ size_t fixed_size_index = count;
+ size_t fixed_type_index = count;
+ size_t srt_tmp_size;
+
+ srt_tmp_size = (count * sizeof(struct H5FD_vsrt_tmp_t));
+
+ if (NULL == (srt_tmp = (H5FD_vsrt_tmp_t *)HDmalloc(srt_tmp_size)))
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc srt_tmp")
+
+ for (i = 0; i < count; i++) {
+ HDassert(i == (size_t)((int)i));
+
+ srt_tmp[i].addr = addrs[i];
+ srt_tmp[i].index = (int)i;
+ }
+
+ /* sort the srt_tmp array */
+ HDqsort(srt_tmp, count, sizeof(struct H5FD_vsrt_tmp_t), H5FD__vsrt_tmp_cmp);
+
+ /* verify no duplicate entries */
+ i = 1;
+
+ for (i = 1; i < count; i++) {
+ HDassert(H5F_addr_lt(srt_tmp[i - 1].addr, srt_tmp[i].addr));
+
+ if (H5F_addr_eq(addrs[i - 1], addrs[i]))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "duplicate addr in vector")
+ }
+
+ if ((NULL == (*s_types_ptr = (H5FD_mem_t *)HDmalloc(count * sizeof(H5FD_mem_t)))) ||
+ (NULL == (*s_addrs_ptr = (haddr_t *)HDmalloc(count * sizeof(haddr_t)))) ||
+ (NULL == (*s_sizes_ptr = (size_t *)HDmalloc(count * sizeof(size_t)))) ||
+ (NULL ==
+ (*s_bufs_ptr = (H5_flexible_const_ptr_t *)HDmalloc(count * sizeof(H5_flexible_const_ptr_t))))) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc sorted vector(s)")
+ }
+
+ HDassert(sizes[0] != 0);
+ HDassert(types[0] != H5FD_MEM_NOLIST);
+
+ /* Scan the sizes and types vectors to determine if the fixed size / type
+ * optimization is in use, and if so, to determine the index of the last
+ * valid value on each vector. We have already verified that the first
+ * elements of these arrays are valid so we can start at the second
+ * element (if it exists).
+ */
+ for (i = 1; i < count && ((fixed_size_index == count) || (fixed_type_index == count)); i++) {
+ if ((fixed_size_index == count) && (sizes[i] == 0))
+ fixed_size_index = i - 1;
+ if ((fixed_type_index == count) && (types[i] == H5FD_MEM_NOLIST))
+ fixed_type_index = i - 1;
+ }
+
+ HDassert(fixed_size_index <= count);
+ HDassert(fixed_type_index <= count);
+
+ /* populate the sorted vectors */
+ for (i = 0; i < count; i++) {
+
+ j = srt_tmp[i].index;
+
+ (*s_types_ptr)[j] = types[MIN(i, fixed_type_index)];
+ (*s_addrs_ptr)[j] = addrs[i];
+ (*s_sizes_ptr)[j] = sizes[MIN(i, fixed_size_index)];
+ (*s_bufs_ptr)[j] = bufs[i];
+ }
+ }
+
+done:
+ if (srt_tmp) {
+
+ HDfree(srt_tmp);
+ srt_tmp = NULL;
+ }
+
+ /* On failure, free the sorted vectors if they were allocated.
+ * Note that we only allocate these vectors if the original array
+ * was not sorted -- thus we check both for failure, and for
+ * the flag indicating that the original vector was not sorted
+ * in increasing address order.
+ */
+ if ((ret_value != SUCCEED) && (!(*vector_was_sorted))) {
+
+ /* free space allocated for sorted vectors */
+ if (*s_types_ptr) {
+
+ HDfree(*s_types_ptr);
+ *s_types_ptr = NULL;
+ }
+
+ if (*s_addrs_ptr) {
+
+ HDfree(*s_addrs_ptr);
+ *s_addrs_ptr = NULL;
+ }
+
+ if (*s_sizes_ptr) {
+
+ HDfree(*s_sizes_ptr);
+ *s_sizes_ptr = NULL;
+ }
+
+ if (*s_bufs_ptr) {
+
+ HDfree(*s_bufs_ptr);
+ *s_bufs_ptr = NULL;
+ }
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD_sort_vector_io_req() */
+
+/*-------------------------------------------------------------------------
* Function: H5FD_delete
*
* Purpose: Private version of H5FDdelete()
@@ -418,6 +2392,7 @@ H5FD_delete(const char *filename, hid_t fapl_id)
FUNC_ENTER_NOAPI(FAIL)
/* Sanity checks */
+
HDassert(filename);
/* Get file access property list */
diff --git a/src/H5FDlog.c b/src/H5FDlog.c
index af34682..4d2e705 100644
--- a/src/H5FDlog.c
+++ b/src/H5FDlog.c
@@ -180,41 +180,46 @@ static herr_t H5FD__log_unlock(H5FD_t *_file);
static herr_t H5FD__log_delete(const char *filename, hid_t fapl_id);
static const H5FD_class_t H5FD_log_g = {
- H5FD_LOG_VALUE, /* value */
- "log", /* name */
- MAXADDR, /* maxaddr */
- H5F_CLOSE_WEAK, /* fc_degree */
- H5FD__log_term, /* terminate */
- NULL, /* sb_size */
- NULL, /* sb_encode */
- NULL, /* sb_decode */
- sizeof(H5FD_log_fapl_t), /* fapl_size */
- H5FD__log_fapl_get, /* fapl_get */
- H5FD__log_fapl_copy, /* fapl_copy */
- H5FD__log_fapl_free, /* fapl_free */
- 0, /* dxpl_size */
- NULL, /* dxpl_copy */
- NULL, /* dxpl_free */
- H5FD__log_open, /* open */
- H5FD__log_close, /* close */
- H5FD__log_cmp, /* cmp */
- H5FD__log_query, /* query */
- NULL, /* get_type_map */
- H5FD__log_alloc, /* alloc */
- H5FD__log_free, /* free */
- H5FD__log_get_eoa, /* get_eoa */
- H5FD__log_set_eoa, /* set_eoa */
- H5FD__log_get_eof, /* get_eof */
- H5FD__log_get_handle, /* get_handle */
- H5FD__log_read, /* read */
- H5FD__log_write, /* write */
- NULL, /* flush */
- H5FD__log_truncate, /* truncate */
- H5FD__log_lock, /* lock */
- H5FD__log_unlock, /* unlock */
- H5FD__log_delete, /* del */
- NULL, /* ctl */
- H5FD_FLMAP_DICHOTOMY /* fl_map */
+ H5FD_CLASS_VERSION, /* struct version */
+ H5FD_LOG_VALUE, /* value */
+ "log", /* name */
+ MAXADDR, /* maxaddr */
+ H5F_CLOSE_WEAK, /* fc_degree */
+ H5FD__log_term, /* terminate */
+ NULL, /* sb_size */
+ NULL, /* sb_encode */
+ NULL, /* sb_decode */
+ sizeof(H5FD_log_fapl_t), /* fapl_size */
+ H5FD__log_fapl_get, /* fapl_get */
+ H5FD__log_fapl_copy, /* fapl_copy */
+ H5FD__log_fapl_free, /* fapl_free */
+ 0, /* dxpl_size */
+ NULL, /* dxpl_copy */
+ NULL, /* dxpl_free */
+ H5FD__log_open, /* open */
+ H5FD__log_close, /* close */
+ H5FD__log_cmp, /* cmp */
+ H5FD__log_query, /* query */
+ NULL, /* get_type_map */
+ H5FD__log_alloc, /* alloc */
+ H5FD__log_free, /* free */
+ H5FD__log_get_eoa, /* get_eoa */
+ H5FD__log_set_eoa, /* set_eoa */
+ H5FD__log_get_eof, /* get_eof */
+ H5FD__log_get_handle, /* get_handle */
+ H5FD__log_read, /* read */
+ H5FD__log_write, /* write */
+ NULL, /* read vector */
+ NULL, /* write vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
+ NULL, /* flush */
+ H5FD__log_truncate, /* truncate */
+ H5FD__log_lock, /* lock */
+ H5FD__log_unlock, /* unlock */
+ H5FD__log_delete, /* del */
+ NULL, /* ctl */
+ H5FD_FLMAP_DICHOTOMY /* fl_map */
};
/* Default configuration, if none provided */
diff --git a/src/H5FDmirror.c b/src/H5FDmirror.c
index 84c744c..0ab5345 100644
--- a/src/H5FDmirror.c
+++ b/src/H5FDmirror.c
@@ -160,6 +160,7 @@ static herr_t H5FD__mirror_unlock(H5FD_t *_file);
static herr_t H5FD__mirror_verify_reply(H5FD_mirror_t *file);
static const H5FD_class_t H5FD_mirror_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_MIRROR_VALUE, /* value */
"mirror", /* name */
MAXADDR, /* maxaddr */
@@ -188,6 +189,10 @@ static const H5FD_class_t H5FD_mirror_g = {
NULL, /* get_handle */
H5FD__mirror_read, /* read */
H5FD__mirror_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
NULL, /* flush */
H5FD__mirror_truncate, /* truncate */
H5FD__mirror_lock, /* lock */
@@ -225,9 +230,11 @@ H5FD_mirror_init(void)
LOG_OP_CALL(__func__);
- if (H5I_VFL != H5I_get_type(H5FD_MIRROR_g))
+ if (H5I_VFL != H5I_get_type(H5FD_MIRROR_g)) {
H5FD_MIRROR_g = H5FD_register(&H5FD_mirror_g, sizeof(H5FD_class_t), FALSE);
-
+ if (H5I_INVALID_HID == H5FD_MIRROR_g)
+ HGOTO_ERROR(H5E_ID, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register mirror");
+ }
ret_value = H5FD_MIRROR_g;
done:
diff --git a/src/H5FDmirror_priv.h b/src/H5FDmirror_priv.h
index 6a7b13e..f647c21 100644
--- a/src/H5FDmirror_priv.h
+++ b/src/H5FDmirror_priv.h
@@ -28,10 +28,10 @@ extern "C" {
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
*/
-/* The maximum allowed size for a receiving buffer when accepting bytes to
+/* Define the maximum allowed size for a receiving buffer when accepting bytes to
* write. Writes larger than this size are performed by multiple accept-write
* steps by the Writer. */
-#define H5FD_MIRROR_DATA_BUFFER_MAX H5_GB /* 1 Gigabyte */
+#define H5FD_MIRROR_DATA_BUFFER_MAX (1024 * 1024 * 1024) /* 1 Gigabyte */
#define H5FD_MIRROR_XMIT_CURR_VERSION 1
#define H5FD_MIRROR_XMIT_MAGIC 0x87F8005B
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index c72578d..2a5e462 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -87,49 +87,66 @@ static herr_t H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, ha
void *buf);
static herr_t H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
const void *buf);
+static herr_t H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count,
+ H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], void *bufs[]);
+static herr_t H5FD__mpio_write_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count,
+ H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ const void *bufs[]);
static herr_t H5FD__mpio_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD__mpio_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static herr_t H5FD__mpio_delete(const char *filename, hid_t fapl_id);
static herr_t H5FD__mpio_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void *input,
void **output);
+/* Other functions */
+static herr_t H5FD__mpio_vector_build_types(
+ uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], H5_flexible_const_ptr_t bufs[],
+ haddr_t *s_addrs[], size_t *s_sizes[], H5_flexible_const_ptr_t *s_bufs[], hbool_t *vector_was_sorted,
+ MPI_Offset *mpi_off, H5_flexible_const_ptr_t *mpi_bufs_base, int *size_i, MPI_Datatype *buf_type,
+ hbool_t *buf_type_created, MPI_Datatype *file_type, hbool_t *file_type_created, char *unused);
+
/* The MPIO file driver information */
static const H5FD_class_t H5FD_mpio_g = {
- H5_VFD_MPIO, /* value */
- "mpio", /* name */
- HADDR_MAX, /* maxaddr */
- H5F_CLOSE_SEMI, /* fc_degree */
- H5FD__mpio_term, /* terminate */
- NULL, /* sb_size */
- NULL, /* sb_encode */
- NULL, /* sb_decode */
- 0, /* fapl_size */
- NULL, /* fapl_get */
- NULL, /* fapl_copy */
- NULL, /* fapl_free */
- 0, /* dxpl_size */
- NULL, /* dxpl_copy */
- NULL, /* dxpl_free */
- H5FD__mpio_open, /* open */
- H5FD__mpio_close, /* close */
- NULL, /* cmp */
- H5FD__mpio_query, /* query */
- NULL, /* get_type_map */
- NULL, /* alloc */
- NULL, /* free */
- H5FD__mpio_get_eoa, /* get_eoa */
- H5FD__mpio_set_eoa, /* set_eoa */
- H5FD__mpio_get_eof, /* get_eof */
- H5FD__mpio_get_handle, /* get_handle */
- H5FD__mpio_read, /* read */
- H5FD__mpio_write, /* write */
- H5FD__mpio_flush, /* flush */
- H5FD__mpio_truncate, /* truncate */
- NULL, /* lock */
- NULL, /* unlock */
- H5FD__mpio_delete, /* del */
- H5FD__mpio_ctl, /* ctl */
- H5FD_FLMAP_DICHOTOMY /* fl_map */
+ H5FD_CLASS_VERSION, /* struct version */
+ H5_VFD_MPIO, /* value */
+ "mpio", /* name */
+ HADDR_MAX, /* maxaddr */
+ H5F_CLOSE_SEMI, /* fc_degree */
+ H5FD__mpio_term, /* terminate */
+ NULL, /* sb_size */
+ NULL, /* sb_encode */
+ NULL, /* sb_decode */
+ 0, /* fapl_size */
+ NULL, /* fapl_get */
+ NULL, /* fapl_copy */
+ NULL, /* fapl_free */
+ 0, /* dxpl_size */
+ NULL, /* dxpl_copy */
+ NULL, /* dxpl_free */
+ H5FD__mpio_open, /* open */
+ H5FD__mpio_close, /* close */
+ NULL, /* cmp */
+ H5FD__mpio_query, /* query */
+ NULL, /* get_type_map */
+ NULL, /* alloc */
+ NULL, /* free */
+ H5FD__mpio_get_eoa, /* get_eoa */
+ H5FD__mpio_set_eoa, /* set_eoa */
+ H5FD__mpio_get_eof, /* get_eof */
+ H5FD__mpio_get_handle, /* get_handle */
+ H5FD__mpio_read, /* read */
+ H5FD__mpio_write, /* write */
+ H5FD__mpio_read_vector, /* read_vector */
+ H5FD__mpio_write_vector, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
+ H5FD__mpio_flush, /* flush */
+ H5FD__mpio_truncate, /* truncate */
+ NULL, /* lock */
+ NULL, /* unlock */
+ H5FD__mpio_delete, /* del */
+ H5FD__mpio_ctl, /* ctl */
+ H5FD_FLMAP_DICHOTOMY /* fl_map */
};
#ifdef H5FDmpio_DEBUG
@@ -188,6 +205,41 @@ H5FD__mpio_parse_debug_str(const char *s)
FUNC_LEAVE_NOAPI_VOID
} /* end H5FD__mpio_parse_debug_str() */
+
+/*---------------------------------------------------------------------------
+ * Function: H5FD__mem_t_to_str
+ *
+ * Purpose: Returns a string representing the enum value in an H5FD_mem_t
+ * enum
+ *
+ * Returns: H5FD_mem_t enum value string
+ *
+ *---------------------------------------------------------------------------
+ */
+static const char *
+H5FD__mem_t_to_str(H5FD_mem_t mem_type)
+{
+ switch (mem_type) {
+ case H5FD_MEM_NOLIST:
+ return "H5FD_MEM_NOLIST";
+ case H5FD_MEM_DEFAULT:
+ return "H5FD_MEM_DEFAULT";
+ case H5FD_MEM_SUPER:
+ return "H5FD_MEM_SUPER";
+ case H5FD_MEM_BTREE:
+ return "H5FD_MEM_BTREE";
+ case H5FD_MEM_DRAW:
+ return "H5FD_MEM_DRAW";
+ case H5FD_MEM_GHEAP:
+ return "H5FD_MEM_GHEAP";
+ case H5FD_MEM_LHEAP:
+ return "H5FD_MEM_LHEAP";
+ case H5FD_MEM_OHDR:
+ return "H5FD_MEM_OHDR";
+ default:
+ return "(Unknown)";
+ }
+}
#endif /* H5FDmpio_DEBUG */
/*-------------------------------------------------------------------------
@@ -864,14 +916,19 @@ H5FD__mpio_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t H5_ATTR
file->mpi_size = mpi_size;
/* Only processor p0 will get the filesize and broadcast it. */
- if (mpi_rank == 0)
+ if (mpi_rank == 0) {
+ /* If MPI_File_get_size fails, broadcast file size as -1 to signal error */
if (MPI_SUCCESS != (mpi_code = MPI_File_get_size(fh, &file_size)))
- HMPI_GOTO_ERROR(NULL, "MPI_File_get_size failed", mpi_code)
+ file_size = (MPI_Offset)-1;
+ }
/* Broadcast file size */
if (MPI_SUCCESS != (mpi_code = MPI_Bcast(&file_size, (int)sizeof(MPI_Offset), MPI_BYTE, 0, comm)))
HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
+ if (file_size < 0)
+ HMPI_GOTO_ERROR(NULL, "MPI_File_get_size failed", mpi_code)
+
/* Determine if the file should be truncated */
if (file_size && (flags & H5F_ACC_TRUNC)) {
/* Truncate the file */
@@ -989,7 +1046,6 @@ H5FD__mpio_query(const H5FD_t H5_ATTR_UNUSED *_file, unsigned long *flags /* out
*flags |= H5FD_FEAT_AGGREGATE_METADATA; /* OK to aggregate metadata allocations */
*flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */
*flags |= H5FD_FEAT_HAS_MPI; /* This driver uses MPI */
- *flags |= H5FD_FEAT_ALLOCATE_EARLY; /* Allocate space early instead of late */
*flags |= H5FD_FEAT_DEFAULT_VFD_COMPATIBLE; /* VFD creates a file which can be opened with the default
VFD */
} /* end if */
@@ -1155,7 +1211,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
MPI_Status mpi_stat; /* Status from I/O operation */
MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
int size_i; /* Integer copy of 'size' to read */
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
MPI_Count bytes_read = 0; /* Number of bytes read in */
MPI_Count type_size; /* MPI datatype used for I/O's size */
MPI_Count io_size; /* Actual number of bytes requested */
@@ -1167,6 +1223,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
int n;
#endif
hbool_t use_view_this_time = FALSE;
+ hbool_t derived_type = FALSE;
hbool_t rank0_bcast = FALSE; /* If read-with-rank0-and-bcast flag was used */
#ifdef H5FDmpio_DEBUG
hbool_t H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file));
@@ -1194,8 +1251,6 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
if (H5FD_mpi_haddr_to_MPIOff(addr, &mpi_off /*out*/) < 0)
HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off")
size_i = (int)size;
- if ((hsize_t)size_i != size)
- HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from size to size_i")
/* Only look for MPI views for raw data transfers */
if (type == H5FD_MEM_DRAW) {
@@ -1262,10 +1317,14 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
rank0_bcast = TRUE;
/* Read on rank 0 Bcast to other ranks */
- if (file->mpi_rank == 0)
+ if (file->mpi_rank == 0) {
+ /* If MPI_File_read_at fails, push an error, but continue
+ * to participate in following MPI_Bcast */
if (MPI_SUCCESS !=
(mpi_code = MPI_File_read_at(file->f, mpi_off, buf, size_i, buf_type, &mpi_stat)))
- HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code)
+ HMPI_DONE_ERROR(FAIL, "MPI_File_read_at failed", mpi_code)
+ }
+
if (MPI_SUCCESS != (mpi_code = MPI_Bcast(buf, size_i, buf_type, 0, file->comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
} /* end if */
@@ -1295,6 +1354,21 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
} /* end if */
else {
+ if (size != (hsize_t)size_i) {
+ /* If HERE, then we need to work around the integer size limit
+ * of 2GB. The input size_t size variable cannot fit into an integer,
+ * but we can get around that limitation by creating a different datatype
+ * and then setting the integer size (or element count) to 1 when using
+ * the derived_type.
+ */
+
+ if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+
+ derived_type = TRUE;
+ size_i = 1;
+ }
+
#ifdef H5FDmpio_DEBUG
if (H5FD_mpio_debug_r_flag)
HDfprintf(stderr, "%s: (%d) doing MPI independent IO\n", __func__, file->mpi_rank);
@@ -1308,12 +1382,22 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
/* Only retrieve bytes read if this rank _actually_ participated in I/O */
if (!rank0_bcast || (rank0_bcast && file->mpi_rank == 0)) {
/* How many bytes were actually read? */
-#if MPI_VERSION >= 3
- if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read)))
+#if H5_CHECK_MPI_VERSION(3, 0)
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read))) {
#else
- if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read)))
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) {
#endif
- HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
+ if (rank0_bcast && file->mpi_rank == 0) {
+ /* If MPI_Get_elements(_x) fails for a rank 0 bcast strategy,
+ * push an error, but continue to participate in the following
+ * MPI_Bcast.
+ */
+ bytes_read = -1;
+ HMPI_DONE_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
+ }
+ else
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
+ }
} /* end if */
/* If the rank0-bcast feature was used, broadcast the # of bytes read to
@@ -1323,7 +1407,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
* of the data. (QAK - 2019/1/2)
*/
if (rank0_bcast)
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_COUNT, 0, file->comm))
#else
if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_INT, 0, file->comm))
@@ -1331,7 +1415,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", 0)
/* Get the type's size */
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size)))
#else
if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size)))
@@ -1347,8 +1431,8 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
#ifdef H5FDmpio_DEBUG
if (H5FD_mpio_debug_r_flag)
- HDfprintf(stderr, "%s: (%d) mpi_off = %ld bytes_read = %lld\n", __func__, file->mpi_rank,
- (long)mpi_off, bytes_read);
+ HDfprintf(stderr, "%s: (%d) mpi_off = %ld bytes_read = %lld type = %s\n", __func__, file->mpi_rank,
+ (long)mpi_off, (long long)bytes_read, H5FD__mem_t_to_str(type));
#endif
/*
@@ -1358,6 +1442,9 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU
HDmemset((char *)buf + bytes_read, 0, (size_t)n);
done:
+ if (derived_type)
+ MPI_Type_free(&buf_type);
+
#ifdef H5FDmpio_DEBUG
if (H5FD_mpio_debug_t_flag)
HDfprintf(stderr, "%s: (%d) Leaving\n", __func__, file->mpi_rank);
@@ -1395,7 +1482,7 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h
MPI_Offset mpi_off;
MPI_Status mpi_stat; /* Status from I/O operation */
MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
MPI_Count bytes_written;
MPI_Count type_size; /* MPI datatype used for I/O's size */
MPI_Count io_size; /* Actual number of bytes requested */
@@ -1470,20 +1557,6 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h
*/
mpi_off = 0;
} /* end if */
- else if (size != (hsize_t)size_i) {
- /* If HERE, then we need to work around the integer size limit
- * of 2GB. The input size_t size variable cannot fit into an integer,
- * but we can get around that limitation by creating a different datatype
- * and then setting the integer size (or element count) to 1 when using
- * the derived_type.
- */
-
- if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0)
- HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
-
- derived_type = TRUE;
- size_i = 1;
- }
/* Write the data. */
if (use_view_this_time) {
@@ -1529,6 +1602,21 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
} /* end if */
else {
+ if (size != (hsize_t)size_i) {
+ /* If HERE, then we need to work around the integer size limit
+ * of 2GB. The input size_t size variable cannot fit into an integer,
+ * but we can get around that limitation by creating a different datatype
+ * and then setting the integer size (or element count) to 1 when using
+ * the derived_type.
+ */
+
+ if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+
+ derived_type = TRUE;
+ size_i = 1;
+ }
+
#ifdef H5FDmpio_DEBUG
if (H5FD_mpio_debug_w_flag)
HDfprintf(stderr, "%s: (%d) doing MPI independent IO\n", __func__, file->mpi_rank);
@@ -1540,7 +1628,7 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h
} /* end else */
/* How many bytes were actually written? */
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_written)))
#else
if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_written)))
@@ -1548,7 +1636,7 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h
HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
/* Get the type's size */
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size)))
#else
if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size)))
@@ -1564,8 +1652,8 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h
#ifdef H5FDmpio_DEBUG
if (H5FD_mpio_debug_w_flag)
- HDfprintf(stderr, "%s: (%d) mpi_off = %ld bytes_written = %lld\n", __func__, file->mpi_rank,
- (long)mpi_off, bytes_written);
+ HDfprintf(stderr, "%s: (%d) mpi_off = %ld bytes_written = %lld type = %s\n", __func__,
+ file->mpi_rank, (long)mpi_off, (long long)bytes_written, H5FD__mem_t_to_str(type));
#endif
/* Each process will keep track of its perceived EOF value locally, and
@@ -1592,6 +1680,1050 @@ done:
} /* end H5FD__mpio_write() */
/*-------------------------------------------------------------------------
+ * Function: H5FD__mpio_vector_build_types
+ *
+ * Purpose: Build MPI datatypes and calculate offset, base buffer, and
+ * size for MPIO vector I/O. Spun off from common code in
+ * H5FD__mpio_vector_read() and H5FD__mpio_vector_write().
+ *
+ * Return: Success: SUCCEED.
+ * Failure: FAIL.
+ *
+ * Programmer: Neil Fortner
+ * March 14, 2022
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__mpio_vector_build_types(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ H5_flexible_const_ptr_t bufs[], haddr_t *s_addrs[], size_t *s_sizes[],
+ H5_flexible_const_ptr_t *s_bufs[], hbool_t *vector_was_sorted,
+ MPI_Offset *mpi_off, H5_flexible_const_ptr_t *mpi_bufs_base, int *size_i,
+ MPI_Datatype *buf_type, hbool_t *buf_type_created, MPI_Datatype *file_type,
+ hbool_t *file_type_created, char *unused)
+{
+ hsize_t bigio_count; /* Transition point to create derived type */
+ hbool_t fixed_size = FALSE;
+ size_t size;
+ H5FD_mem_t * s_types = NULL;
+ int * mpi_block_lengths = NULL;
+ MPI_Aint mpi_bufs_base_Aint;
+ MPI_Aint * mpi_bufs = NULL;
+ MPI_Aint * mpi_displacements = NULL;
+ MPI_Datatype *sub_types = NULL;
+ uint8_t * sub_types_created = NULL;
+ int i;
+ int j;
+ int mpi_code; /* MPI return code */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(s_sizes);
+ HDassert(s_bufs);
+ HDassert(vector_was_sorted);
+ HDassert(*vector_was_sorted);
+ HDassert(mpi_off);
+ HDassert(mpi_bufs_base);
+ HDassert(size_i);
+ HDassert(buf_type);
+ HDassert(buf_type_created);
+ HDassert(!*buf_type_created);
+ HDassert(file_type);
+ HDassert(file_type_created);
+ HDassert(!*file_type_created);
+ HDassert(unused);
+
+ /* Get bio I/O transition point (may be lower than 2G for testing) */
+ bigio_count = H5_mpi_get_bigio_count();
+
+ if (count == 1) {
+ /* Single block. Just use a series of MPI_BYTEs for the file view.
+ */
+ *size_i = (int)sizes[0];
+ *buf_type = MPI_BYTE;
+ *file_type = MPI_BYTE;
+ *mpi_bufs_base = bufs[0];
+
+ /* Setup s_addrs, s_sizes and s_bufs (needed for incomplete read filling code and eof
+ * calculation code) */
+ *s_addrs = addrs;
+ *s_sizes = sizes;
+ *s_bufs = bufs;
+
+ /* some numeric conversions */
+ if (H5FD_mpi_haddr_to_MPIOff(addrs[0], mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI offset")
+
+ /* Check for size overflow */
+ if (sizes[0] > bigio_count) {
+ /* We need to work around the integer size limit of 2GB. The input size_t size
+ * variable cannot fit into an integer, but we can get around that limitation by
+ * creating a different datatype and then setting the integer size (or element
+ * count) to 1 when using the derived_type. */
+
+ if (H5_mpio_create_large_type(sizes[0], 0, MPI_BYTE, buf_type) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+ *buf_type_created = TRUE;
+
+ if (H5_mpio_create_large_type(sizes[0], 0, MPI_BYTE, file_type) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+ *file_type_created = TRUE;
+
+ *size_i = 1;
+ }
+ }
+ else if (count > 0) { /* create MPI derived types describing the vector write */
+
+ /* sort the vector I/O request into increasing address order if required
+ *
+ * If the vector is already sorted, the base addresses of types, addrs, sizes,
+ * and bufs will be returned in s_types, s_addrs, s_sizes, and s_bufs respectively.
+ *
+ * If the vector was not already sorted, new, sorted versions of types, addrs, sizes, and bufs
+ * are allocated, populated, and returned in s_types, s_addrs, s_sizes, and s_bufs respectively.
+ * In this case, this function must free the memory allocated for the sorted vectors.
+ */
+ if (H5FD_sort_vector_io_req(vector_was_sorted, count, types, addrs, sizes, bufs, &s_types, s_addrs,
+ s_sizes, s_bufs) < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "can't sort vector I/O request")
+
+ if ((NULL == (mpi_block_lengths = (int *)HDmalloc((size_t)count * sizeof(int)))) ||
+ (NULL == (mpi_displacements = (MPI_Aint *)HDmalloc((size_t)count * sizeof(MPI_Aint)))) ||
+ (NULL == (mpi_bufs = (MPI_Aint *)HDmalloc((size_t)count * sizeof(MPI_Aint))))) {
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc mpi block lengths / displacement")
+ }
+
+ /* when we setup mpi_bufs[] below, all addresses are offsets from
+ * mpi_bufs_base.
+ *
+ * Since these offsets must all be positive, we must scan through
+ * s_bufs[] to find the smallest value, and choose that for
+ * mpi_bufs_base.
+ */
+
+ j = 0; /* guess at the index of the smallest value of s_bufs[] */
+
+ for (i = 1; i < (int)count; i++) {
+
+ if ((*s_bufs)[i].cvp < (*s_bufs)[j].cvp) {
+
+ j = i;
+ }
+ }
+
+ *mpi_bufs_base = (*s_bufs)[j];
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address(mpi_bufs_base->cvp, &mpi_bufs_base_Aint)))
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address for s_bufs[] to mpi_bufs_base failed", mpi_code)
+
+ *size_i = 1;
+
+ fixed_size = FALSE;
+
+ /* load the mpi_block_lengths and mpi_displacements arrays */
+ for (i = 0; i < (int)count; i++) {
+ /* Determine size of this vector element */
+ if (!fixed_size) {
+ if ((*s_sizes)[i] == 0) {
+ HDassert(vector_was_sorted);
+ fixed_size = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+ size = (*s_sizes)[i];
+ }
+ }
+
+ /* Add to block lengths and displacements arrays */
+ mpi_block_lengths[i] = (int)size;
+ mpi_displacements[i] = (MPI_Aint)(*s_addrs)[i];
+
+ /* convert s_bufs[i] to MPI_Aint... */
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_address((*s_bufs)[i].cvp, &(mpi_bufs[i]))))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_address for s_bufs[] - mpi_bufs_base failed", mpi_code)
+
+ /*... and then subtract mpi_bufs_base_Aint from it. */
+#if ((MPI_VERSION > 3) || ((MPI_VERSION == 3) && (MPI_SUBVERSION >= 1)))
+ mpi_bufs[i] = MPI_Aint_diff(mpi_bufs[i], mpi_bufs_base_Aint);
+#else
+ mpi_bufs[i] = mpi_bufs[i] - mpi_bufs_base_Aint;
+#endif
+
+ /* Check for size overflow */
+ if (size > bigio_count) {
+ /* We need to work around the integer size limit of 2GB. The input size_t size
+ * variable cannot fit into an integer, but we can get around that limitation by
+ * creating a different datatype and then setting the integer size (or element
+ * count) to 1 when using the derived_type. */
+
+ /* Allocate arrays to keep track of types and whether they were created, if
+ * necessary */
+ if (!sub_types) {
+ HDassert(!sub_types_created);
+
+ if (NULL == (sub_types = (int *)HDmalloc((size_t)count * sizeof(MPI_Datatype))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc sub types array")
+ if (NULL == (sub_types_created = (uint8_t *)HDcalloc((size_t)count, 1))) {
+ H5MM_free(sub_types);
+ sub_types = NULL;
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc sub types created array")
+ }
+
+ /* Initialize sub_types to all MPI_BYTE */
+ for (j = 0; j < (int)count; j++)
+ sub_types[j] = MPI_BYTE;
+ }
+ HDassert(sub_types_created);
+
+ /* Create type for large block */
+ if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &sub_types[i]) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+ sub_types_created[i] = TRUE;
+
+ /* Only one of these large types for this vector element */
+ mpi_block_lengths[i] = 1;
+ }
+ else
+ HDassert(size == (size_t)mpi_block_lengths[i]);
+ }
+
+ /* create the memory MPI derived types */
+ if (sub_types) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)count, mpi_block_lengths, mpi_bufs,
+ sub_types, buf_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct for buf_type failed", mpi_code)
+ }
+ else if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)count, mpi_block_lengths, mpi_bufs,
+ MPI_BYTE, buf_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed for buf_type failed", mpi_code)
+
+ *buf_type_created = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(buf_type)))
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit for buf_type failed", mpi_code)
+
+ /* create the file MPI derived type */
+ if (sub_types) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)count, mpi_block_lengths,
+ mpi_displacements, sub_types, file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct for file_type failed", mpi_code)
+ }
+ else if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)count, mpi_block_lengths,
+ mpi_displacements, MPI_BYTE, file_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed for file_type failed", mpi_code)
+
+ *file_type_created = TRUE;
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(file_type)))
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit for file_type failed", mpi_code)
+
+ /* Free up memory used to build types */
+ HDassert(mpi_block_lengths);
+ HDfree(mpi_block_lengths);
+ mpi_block_lengths = NULL;
+
+ HDassert(mpi_displacements);
+ HDfree(mpi_displacements);
+ mpi_displacements = NULL;
+
+ HDassert(mpi_bufs);
+ HDfree(mpi_bufs);
+ mpi_bufs = NULL;
+
+ if (sub_types) {
+ HDassert(sub_types);
+
+ for (i = 0; i < (int)count; i++)
+ if (sub_types_created[i])
+ MPI_Type_free(&sub_types[i]);
+
+ HDfree(sub_types);
+ sub_types = NULL;
+ HDfree(sub_types_created);
+ sub_types_created = NULL;
+ }
+
+ /* some numeric conversions */
+ if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0")
+ }
+ else {
+ /* setup for null participation in the collective operation. */
+ *buf_type = MPI_BYTE;
+ *file_type = MPI_BYTE;
+
+ /* Set non-NULL pointer for I/O operation */
+ mpi_bufs_base->vp = unused;
+
+ /* MPI count to read */
+ *size_i = 0;
+
+ /* some numeric conversions */
+ if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0")
+ }
+
+done:
+ /* free sorted vectors if they exist */
+ if (!vector_was_sorted)
+ if (s_types) {
+ HDfree(s_types);
+ s_types = NULL;
+ }
+
+ /* Clean up on error */
+ if (ret_value < 0) {
+ if (mpi_block_lengths) {
+ HDfree(mpi_block_lengths);
+ mpi_block_lengths = NULL;
+ }
+
+ if (mpi_displacements) {
+ HDfree(mpi_displacements);
+ mpi_displacements = NULL;
+ }
+
+ if (mpi_bufs) {
+ HDfree(mpi_bufs);
+ mpi_bufs = NULL;
+ }
+
+ if (sub_types) {
+ HDassert(sub_types_created);
+
+ for (i = 0; i < (int)count; i++)
+ if (sub_types_created[i])
+ MPI_Type_free(&sub_types[i]);
+
+ HDfree(sub_types);
+ sub_types = NULL;
+ HDfree(sub_types_created);
+ sub_types_created = NULL;
+ }
+ }
+
+ /* Make sure we cleaned up */
+ HDassert(!mpi_block_lengths);
+ HDassert(!mpi_displacements);
+ HDassert(!mpi_bufs);
+ HDassert(!sub_types);
+ HDassert(!sub_types_created);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD__mpio_vector_build_types() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD__mpio_read_vector()
+ *
+ * Purpose: The behaviour of this function dependes on the value of
+ * the io_xfer_mode obtained from the context.
+ *
+ * If it is H5FD_MPIO_COLLECTIVE, this is a collective
+ * operation, which allows us to use MPI_File_set_view, and
+ * then perform the entire vector read in a single MPI call.
+ *
+ * Do this (if count is positive), by constructing memory
+ * and file derived types from the supplied vector, using
+ * file type to set the file view, and then reading the
+ * the memory type from file. Note that this read is
+ * either independent or collective depending on the
+ * value of mpio_coll_opt -- again obtained from the context.
+ *
+ * If count is zero, participate in the collective read
+ * (if so configured) with an empty read.
+ *
+ * Finally, set the file view back to its default state.
+ *
+ * In contrast, if io_xfer_mode is H5FD_MPIO_INDEPENDENT,
+ * this call is independent, and thus we cannot use
+ * MPI_File_set_view().
+ *
+ * In this case, simply walk the vector, and issue an
+ * independent read for each entry.
+ *
+ * Return: Success: SUCCEED.
+ * Failure: FAIL.
+ *
+ * Programmer: John Mainzer
+ * March 15, 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count, H5FD_mem_t types[],
+ haddr_t addrs[], size_t sizes[], void *bufs[])
+{
+ H5FD_mpio_t * file = (H5FD_mpio_t *)_file;
+ hbool_t vector_was_sorted = TRUE;
+ haddr_t * s_addrs = NULL;
+ size_t * s_sizes = NULL;
+ void ** s_bufs = NULL;
+ char unused = 0; /* Unused, except for non-NULL pointer value */
+ void * mpi_bufs_base = NULL;
+ MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
+ hbool_t buf_type_created = FALSE;
+ MPI_Datatype file_type = MPI_BYTE; /* MPI description of the selection in file */
+ hbool_t file_type_created = FALSE;
+ int i;
+ int mpi_code; /* MPI return code */
+ MPI_Offset mpi_off = 0;
+ MPI_Status mpi_stat; /* Status from I/O operation */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */
+ H5FD_mpio_collective_opt_t coll_opt_mode; /* whether we are doing collective or independent I/O */
+ int size_i;
+#if MPI_VERSION >= 3
+ MPI_Count bytes_read = 0; /* Number of bytes read in */
+ MPI_Count type_size; /* MPI datatype used for I/O's size */
+ MPI_Count io_size; /* Actual number of bytes requested */
+ MPI_Count n;
+#else
+ int bytes_read = 0; /* Number of bytes read in */
+ int type_size; /* MPI datatype used for I/O's size */
+ int io_size; /* Actual number of bytes requested */
+ int n;
+#endif
+ hbool_t rank0_bcast = FALSE; /* If read-with-rank0-and-bcast flag was used */
+#ifdef H5FDmpio_DEBUG
+ hbool_t H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file));
+ hbool_t H5FD_mpio_debug_r_flag = (H5FD_mpio_debug_flags_s[(int)'r'] && H5FD_MPIO_TRACE_THIS_RANK(file));
+#endif
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_t_flag)
+ HDfprintf(stderr, "%s: (%d) Entering\n", __func__, file->mpi_rank);
+#endif
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(H5FD_MPIO == file->pub.driver_id);
+ HDassert((types) || (count == 0));
+ HDassert((addrs) || (count == 0));
+ HDassert((sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* verify that the first elements of the sizes and types arrays are
+ * valid.
+ */
+ HDassert((count == 0) || (sizes[0] != 0));
+ HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+
+ /* Get the transfer mode from the API context
+ *
+ * This flag is set to H5FD_MPIO_COLLECTIVE if the API call is
+ * collective, and to H5FD_MPIO_INDEPENDENT if it is not.
+ *
+ * While this doesn't mean that we are actually about to do a collective
+ * read, it does mean that all ranks are here, so we can use MPI_File_set_view().
+ */
+ if (H5CX_get_io_xfer_mode(&xfer_mode) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode")
+
+ if (xfer_mode == H5FD_MPIO_COLLECTIVE) {
+ /* Build MPI types, etc. */
+ if (H5FD__mpio_vector_build_types(count, types, addrs, sizes, (H5_flexible_const_ptr_t *)bufs,
+ &s_addrs, &s_sizes, (H5_flexible_const_ptr_t **)&s_bufs,
+ &vector_was_sorted, &mpi_off,
+ (H5_flexible_const_ptr_t *)&mpi_bufs_base, &size_i, &buf_type,
+ &buf_type_created, &file_type, &file_type_created, &unused) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't build MPI datatypes for I/O")
+
+ /* free sorted addrs vector if it exists */
+ if (!vector_was_sorted)
+ if (s_addrs) {
+ HDfree(s_addrs);
+ s_addrs = NULL;
+ }
+
+ /* Portably initialize MPI status variable */
+ HDmemset(&mpi_stat, 0, sizeof(mpi_stat));
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_r_flag)
+ HDfprintf(stdout, "%s: mpi_off = %ld size_i = %d\n", __func__, (long)mpi_off, size_i);
+#endif
+
+ /* Setup the file view. */
+ if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, mpi_off, MPI_BYTE, file_type,
+ H5FD_mpi_native_g, file->info)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
+
+ /* Reset mpi_off to 0 since the view now starts at the data offset */
+ if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, &mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0")
+
+ /* Get the collective_opt property to check whether the application wants to do IO individually.
+ */
+ if (H5CX_get_mpio_coll_opt(&coll_opt_mode) < 0)
+
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_op property")
+
+ /* Read the data. */
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_r_flag)
+ HDfprintf(stdout, "%s: using MPIO collective mode\n", __func__);
+#endif
+ if (coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO) {
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_r_flag)
+ HDfprintf(stdout, "%s: doing MPI collective IO\n", __func__);
+#endif
+ /* Check whether we should read from rank 0 and broadcast to other ranks */
+ if (H5CX_get_mpio_rank0_bcast()) {
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_r_flag)
+ HDfprintf(stdout, "%s: doing read-rank0-and-MPI_Bcast\n", __func__);
+#endif
+ /* Indicate path we've taken */
+ rank0_bcast = TRUE;
+
+ /* Read on rank 0 Bcast to other ranks */
+ if (file->mpi_rank == 0)
+ if (MPI_SUCCESS != (mpi_code = MPI_File_read_at(file->f, mpi_off, mpi_bufs_base, size_i,
+ buf_type, &mpi_stat)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at_all failed", mpi_code)
+ if (MPI_SUCCESS != (mpi_code = MPI_Bcast(mpi_bufs_base, size_i, buf_type, 0, file->comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
+ } /* end if */
+ else if (MPI_SUCCESS != (mpi_code = MPI_File_read_at_all(file->f, mpi_off, mpi_bufs_base, size_i,
+ buf_type, &mpi_stat)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at_all failed", mpi_code)
+ } /* end if */
+ else if (size_i > 0) {
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_r_flag)
+ HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__);
+#endif
+
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_File_read_at(file->f, mpi_off, mpi_bufs_base, size_i, buf_type, &mpi_stat)))
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code)
+
+ } /* end else */
+
+ /* Reset the file view */
+ if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, (MPI_Offset)0, MPI_BYTE, MPI_BYTE,
+ H5FD_mpi_native_g, file->info)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
+
+ /* Only retrieve bytes read if this rank _actually_ participated in I/O */
+ if (!rank0_bcast || (rank0_bcast && file->mpi_rank == 0)) {
+ /* How many bytes were actually read? */
+#if MPI_VERSION >= 3
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read)))
+#else
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read)))
+#endif
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
+ } /* end if */
+
+ /* If the rank0-bcast feature was used, broadcast the # of bytes read to
+ * other ranks, which didn't perform any I/O.
+ */
+ /* NOTE: This could be optimized further to be combined with the broadcast
+ * of the data. (QAK - 2019/1/2)
+ * Or have rank 0 clear the unread parts of the buffer prior to
+ * the bcast. (NAF - 2021/9/15)
+ */
+ if (rank0_bcast)
+#if MPI_VERSION >= 3
+ if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_COUNT, 0, file->comm))
+#else
+ if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_INT, 0, file->comm))
+#endif
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", 0)
+
+ /* Get the type's size */
+#if MPI_VERSION >= 3
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size)))
+#else
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size)))
+#endif
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code)
+
+ /* Compute the actual number of bytes requested */
+ io_size = type_size * size_i;
+
+ /* Check for read failure */
+ if (bytes_read < 0 || bytes_read > io_size)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file read failed")
+
+ /* Check for incomplete read */
+ n = io_size - bytes_read;
+ if (n > 0) {
+ i = (int)count - 1;
+
+ /* Iterate over sorted array in reverse, filling in zeroes to
+ * sections of the buffers that were not read to */
+ do {
+ HDassert(i >= 0);
+
+#if MPI_VERSION >= 3
+ io_size = MIN(n, (MPI_Count)s_sizes[i]);
+ bytes_read = (MPI_Count)s_sizes[i] - io_size;
+#else
+ io_size = MIN(n, (int)s_sizes[i]);
+ bytes_read = (int)s_sizes[i] - io_size;
+#endif
+ HDassert(bytes_read >= 0);
+
+ HDmemset((char *)s_bufs[i] + bytes_read, 0, (size_t)io_size);
+
+ n -= io_size;
+ i--;
+ } while (n > 0);
+ }
+ }
+ else if (count > 0) {
+ haddr_t max_addr = HADDR_MAX;
+ hbool_t fixed_size = FALSE;
+ size_t size;
+
+ /* The read is part of an independent operation. As a result,
+ * we can't use MPI_File_set_view() (since it it a collective operation),
+ * and thus we can't use the above code to construct the MPI datatypes.
+ * In the future, we could write code to detect when a contiguous slab
+ * in the file selection spans multiple vector elements and construct a
+ * memory datatype to match this larger block in the file, but for now
+ * just read in each element of the vector in a separate
+ * MPI_File_read_at() call.
+ *
+ * We could also just detect the case when the entire file selection is
+ * contiguous, which would allow us to use
+ * H5FD__mpio_vector_build_types() to construct the memory datatype.
+ */
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_r_flag)
+ HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__);
+#endif
+
+ /* Loop over vector elements */
+ for (i = 0; i < (int)count; i++) {
+ /* Convert address to mpi offset */
+ if (H5FD_mpi_haddr_to_MPIOff(addrs[i], &mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off")
+
+ /* Calculate I/O size */
+ if (!fixed_size) {
+ if (sizes[i] == 0) {
+ fixed_size = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+ size = sizes[i];
+ }
+ }
+ size_i = (int)size;
+
+ if (size != (size_t)size_i) {
+ /* If HERE, then we need to work around the integer size limit
+ * of 2GB. The input size_t size variable cannot fit into an integer,
+ * but we can get around that limitation by creating a different datatype
+ * and then setting the integer size (or element count) to 1 when using
+ * the derived_type.
+ */
+
+ if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+
+ buf_type_created = TRUE;
+ size_i = 1;
+ }
+
+ /* Check if we actually need to do I/O */
+ if (addrs[i] < max_addr) {
+ /* Portably initialize MPI status variable */
+ HDmemset(&mpi_stat, 0, sizeof(mpi_stat));
+
+ /* Issue read */
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_File_read_at(file->f, mpi_off, bufs[i], size_i, buf_type, &mpi_stat)))
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code)
+
+ /* How many bytes were actually read? */
+#if MPI_VERSION >= 3
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, MPI_BYTE, &bytes_read)))
+#else
+ if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read)))
+#endif
+ HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
+
+ /* Compute the actual number of bytes requested */
+#if MPI_VERSION >= 3
+ io_size = (MPI_Count)size;
+#else
+ io_size = (int)size;
+#endif
+
+ /* Check for read failure */
+ if (bytes_read < 0 || bytes_read > io_size)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file read failed")
+
+ /*
+ * If we didn't read the entire I/O, fill in zeroes beyond end of
+ * the physical MPI file and don't issue any more reads at higher
+ * addresses.
+ */
+ if ((n = (io_size - bytes_read)) > 0) {
+ HDmemset((char *)bufs[i] + bytes_read, 0, (size_t)n);
+ max_addr = addrs[i] + (haddr_t)bytes_read;
+ }
+ }
+ else {
+ /* Read is past the max address, fill in zeroes */
+ HDmemset((char *)bufs[i], 0, size);
+ }
+ }
+ }
+
+done:
+ if (buf_type_created) {
+ MPI_Type_free(&buf_type);
+ }
+
+ if (file_type_created) {
+ MPI_Type_free(&file_type);
+ }
+
+ /* free sorted vectors if they exist */
+ if (!vector_was_sorted) {
+ if (s_addrs) {
+ HDfree(s_addrs);
+ s_addrs = NULL;
+ }
+ if (s_sizes) {
+ HDfree(s_sizes);
+ s_sizes = NULL;
+ }
+ if (s_bufs) {
+ HDfree(s_bufs);
+ s_bufs = NULL;
+ }
+ }
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_t_flag)
+ HDfprintf(stdout, "%s: Leaving, proc %d: ret_value = %d\n", __func__, file->mpi_rank, ret_value);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* end H5FD__mpio_read_vector() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5FD__mpio_write_vector
+ *
+ * Purpose: The behaviour of this function dependes on the value of
+ * the io_xfer_mode obtained from the context.
+ *
+ * If it is H5FD_MPIO_COLLECTIVE, this is a collective
+ * operation, which allows us to use MPI_File_set_view, and
+ * then perform the entire vector write in a single MPI call.
+ *
+ * Do this (if count is positive), by constructing memory
+ * and file derived types from the supplied vector, using
+ * file type to set the file view, and then writing the
+ * the memory type to file. Note that this write is
+ * either independent or collective depending on the
+ * value of mpio_coll_opt -- again obtained from the context.
+ *
+ * If count is zero, participate in the collective write
+ * (if so configured) with an empty write.
+ *
+ * Finally, set the file view back to its default state.
+ *
+ * In contrast, if io_xfer_mode is H5FD_MPIO_INDEPENDENT,
+ * this call is independent, and thus we cannot use
+ * MPI_File_set_view().
+ *
+ * In this case, simply walk the vector, and issue an
+ * independent write for each entry.
+ *
+ * Return: Success: SUCCEED.
+ * Failure: FAIL.
+ *
+ * Programmer: John Mainzer
+ * March 15, 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5FD__mpio_write_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count, H5FD_mem_t types[],
+ haddr_t addrs[], size_t sizes[], const void *bufs[])
+{
+ H5FD_mpio_t * file = (H5FD_mpio_t *)_file;
+ hbool_t vector_was_sorted = TRUE;
+ haddr_t * s_addrs = NULL;
+ size_t * s_sizes = NULL;
+ const void ** s_bufs = NULL;
+ char unused = 0; /* Unused, except for non-NULL pointer value */
+ const void * mpi_bufs_base = NULL;
+ MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
+ hbool_t buf_type_created = FALSE;
+ MPI_Datatype file_type = MPI_BYTE; /* MPI description of the selection in file */
+ hbool_t file_type_created = FALSE;
+ int i;
+ int mpi_code; /* MPI return code */
+ MPI_Offset mpi_off = 0;
+ MPI_Status mpi_stat; /* Status from I/O operation */
+ H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */
+ H5FD_mpio_collective_opt_t coll_opt_mode; /* whether we are doing collective or independent I/O */
+ int size_i;
+#ifdef H5FDmpio_DEBUG
+ hbool_t H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file));
+ hbool_t H5FD_mpio_debug_w_flag = (H5FD_mpio_debug_flags_s[(int)'w'] && H5FD_MPIO_TRACE_THIS_RANK(file));
+#endif
+ haddr_t max_addr = 0;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_t_flag)
+ HDfprintf(stderr, "%s: (%d) Entering\n", __func__, file->mpi_rank);
+#endif
+
+ /* Sanity checks */
+ HDassert(file);
+ HDassert(H5FD_MPIO == file->pub.driver_id);
+ HDassert((types) || (count == 0));
+ HDassert((addrs) || (count == 0));
+ HDassert((sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* verify that the first elements of the sizes and types arrays are
+ * valid.
+ */
+ HDassert((count == 0) || (sizes[0] != 0));
+ HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST));
+
+ /* Verify that no data is written when between MPI_Barrier()s during file flush */
+
+ HDassert(!H5CX_get_mpi_file_flushing());
+
+ /* Get the transfer mode from the API context
+ *
+ * This flag is set to H5FD_MPIO_COLLECTIVE if the API call is
+ * collective, and to H5FD_MPIO_INDEPENDENT if it is not.
+ *
+ * While this doesn't mean that we are actually about to do a collective
+ * write, it does mean that all ranks are here, so we can use MPI_File_set_view().
+ */
+ if (H5CX_get_io_xfer_mode(&xfer_mode) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode")
+
+ if (xfer_mode == H5FD_MPIO_COLLECTIVE) {
+ /* Build MPI types, etc. */
+ if (H5FD__mpio_vector_build_types(count, types, addrs, sizes, (H5_flexible_const_ptr_t *)bufs,
+ &s_addrs, &s_sizes, (H5_flexible_const_ptr_t **)&s_bufs,
+ &vector_was_sorted, &mpi_off,
+ (H5_flexible_const_ptr_t *)&mpi_bufs_base, &size_i, &buf_type,
+ &buf_type_created, &file_type, &file_type_created, &unused) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't build MPI datatypes for I/O")
+
+ /* Compute max addr writted to */
+ if (count > 0)
+ max_addr = s_addrs[count - 1] + (haddr_t)(s_sizes[count - 1]);
+
+ /* free sorted vectors if they exist */
+ if (!vector_was_sorted) {
+ if (s_addrs) {
+ HDfree(s_addrs);
+ s_addrs = NULL;
+ }
+ if (s_sizes) {
+ HDfree(s_sizes);
+ s_sizes = NULL;
+ }
+ if (s_bufs) {
+ HDfree(s_bufs);
+ s_bufs = NULL;
+ }
+ }
+
+ /* Portably initialize MPI status variable */
+ HDmemset(&mpi_stat, 0, sizeof(MPI_Status));
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_w_flag)
+ HDfprintf(stdout, "%s: mpi_off = %ld size_i = %d\n", __func__, (long)mpi_off, size_i);
+#endif
+
+ /* Setup the file view. */
+ if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, mpi_off, MPI_BYTE, file_type,
+ H5FD_mpi_native_g, file->info)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
+
+ /* Reset mpi_off to 0 since the view now starts at the data offset */
+ if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, &mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0")
+
+ /* Get the collective_opt property to check whether the application wants to do IO individually.
+ */
+ if (H5CX_get_mpio_coll_opt(&coll_opt_mode) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_op property")
+
+ /* Write the data. */
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_w_flag)
+ HDfprintf(stdout, "%s: using MPIO collective mode\n", __func__);
+#endif
+
+ if (coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO) {
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_w_flag)
+ HDfprintf(stdout, "%s: doing MPI collective IO\n", __func__);
+#endif
+
+ if (MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(file->f, mpi_off, mpi_bufs_base, size_i,
+ buf_type, &mpi_stat)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code)
+ } /* end if */
+ else if (size_i > 0) {
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_w_flag)
+ HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__);
+#endif
+
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_File_write_at(file->f, mpi_off, mpi_bufs_base, size_i, buf_type, &mpi_stat)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at failed", mpi_code)
+ } /* end else */
+
+ /* Reset the file view */
+ if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, (MPI_Offset)0, MPI_BYTE, MPI_BYTE,
+ H5FD_mpi_native_g, file->info)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
+ }
+ else if (count > 0) {
+ hbool_t fixed_size = FALSE;
+ size_t size;
+
+ /* The read is part of an independent operation. As a result,
+ * we can't use MPI_File_set_view() (since it it a collective operation),
+ * and thus we can't use the above code to construct the MPI datatypes.
+ * In the future, we could write code to detect when a contiguous slab
+ * in the file selection spans multiple vector elements and construct a
+ * memory datatype to match this larger block in the file, but for now
+ * just read in each element of the vector in a separate
+ * MPI_File_read_at() call.
+ *
+ * We could also just detect the case when the entire file selection is
+ * contiguous, which would allow us to use
+ * H5FD__mpio_vector_build_types() to construct the memory datatype.
+ */
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_w_flag)
+ HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__);
+#endif
+
+ /* Loop over vector elements */
+ for (i = 0; i < (int)count; i++) {
+ /* Convert address to mpi offset */
+ if (H5FD_mpi_haddr_to_MPIOff(addrs[i], &mpi_off) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off")
+
+ /* Calculate I/O size */
+ if (!fixed_size) {
+ if (sizes[i] == 0) {
+ fixed_size = TRUE;
+ size = sizes[i - 1];
+ }
+ else {
+ size = sizes[i];
+ }
+ }
+ size_i = (int)size;
+
+ if (size != (size_t)size_i) {
+ /* If HERE, then we need to work around the integer size limit
+ * of 2GB. The input size_t size variable cannot fit into an integer,
+ * but we can get around that limitation by creating a different datatype
+ * and then setting the integer size (or element count) to 1 when using
+ * the derived_type.
+ */
+
+ if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0)
+ HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype")
+
+ buf_type_created = TRUE;
+ size_i = 1;
+ }
+
+ /* Perform write */
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_File_write_at(file->f, mpi_off, bufs[i], size_i, buf_type, &mpi_stat)))
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at failed", mpi_code)
+
+ /* Check if this is the highest address written to so far */
+ if (addrs[i] + size > max_addr)
+ max_addr = addrs[i] + size;
+ }
+ }
+
+ /* Each process will keep track of its perceived EOF value locally, and
+ * ultimately we will reduce this value to the maximum amongst all
+ * processes, but until then keep the actual eof at HADDR_UNDEF just in
+ * case something bad happens before that point. (rather have a value
+ * we know is wrong sitting around rather than one that could only
+ * potentially be wrong.)
+ */
+ file->eof = HADDR_UNDEF;
+
+ /* check to see if the local eof has changed been extended, and update if so */
+ if (max_addr > file->local_eof)
+ file->local_eof = max_addr;
+
+done:
+ if (buf_type_created)
+ MPI_Type_free(&buf_type);
+
+ if (file_type_created)
+ MPI_Type_free(&file_type);
+
+ /* Cleanup on error */
+ if (ret_value < 0 && !vector_was_sorted) {
+ if (s_addrs) {
+ HDfree(s_addrs);
+ s_addrs = NULL;
+ }
+ if (s_sizes) {
+ HDfree(s_sizes);
+ s_sizes = NULL;
+ }
+ if (s_bufs) {
+ HDfree(s_bufs);
+ s_bufs = NULL;
+ }
+ }
+
+ /* Make sure we cleaned up */
+ HDassert(vector_was_sorted || !s_addrs);
+ HDassert(vector_was_sorted || !s_sizes);
+ HDassert(vector_was_sorted || !s_bufs);
+
+#ifdef H5FDmpio_DEBUG
+ if (H5FD_mpio_debug_t_flag)
+ HDfprintf(stdout, "%s: Leaving, proc %d: ret_value = %d\n", __func__, file->mpi_rank, ret_value);
+#endif
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD__mpio_write_vector() */
+
+/*-------------------------------------------------------------------------
* Function: H5FD__mpio_flush
*
* Purpose: Makes sure that all data is on disk. This is collective.
@@ -1703,17 +2835,19 @@ H5FD__mpio_truncate(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, hbool_t H5_ATTR
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
/* Only processor p0 will get the filesize and broadcast it. */
- /* (Note that throwing an error here will cause non-rank 0 processes
- * to hang in following Bcast. -QAK, 3/17/2018)
- */
- if (0 == file->mpi_rank)
+ if (0 == file->mpi_rank) {
+ /* If MPI_File_get_size fails, broadcast file size as -1 to signal error */
if (MPI_SUCCESS != (mpi_code = MPI_File_get_size(file->f, &size)))
- HMPI_GOTO_ERROR(FAIL, "MPI_File_get_size failed", mpi_code)
+ size = (MPI_Offset)-1;
+ }
/* Broadcast file size */
if (MPI_SUCCESS != (mpi_code = MPI_Bcast(&size, (int)sizeof(MPI_Offset), MPI_BYTE, 0, file->comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code)
+ if (size < 0)
+ HMPI_GOTO_ERROR(FAIL, "MPI_File_get_size failed", mpi_code)
+
if (H5FD_mpi_haddr_to_MPIOff(file->eoa, &needed_eof) < 0)
HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "cannot convert from haddr_t to MPI_Offset")
@@ -1796,9 +2930,13 @@ H5FD__mpio_delete(const char *filename, hid_t fapl_id)
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
/* Delete the file */
- if (mpi_rank == 0)
+ if (mpi_rank == 0) {
+ /* If MPI_File_delete fails, push an error but
+ * still participate in the following MPI_Barrier
+ */
if (MPI_SUCCESS != (mpi_code = MPI_File_delete(filename, info)))
- HMPI_GOTO_ERROR(FAIL, "MPI_File_delete failed", mpi_code)
+ HMPI_DONE_ERROR(FAIL, "MPI_File_delete failed", mpi_code)
+ }
/* Set up a barrier (don't want processes to run ahead of the delete) */
if (MPI_SUCCESS != (mpi_code = MPI_Barrier(comm)))
diff --git a/src/H5FDmpio.h b/src/H5FDmpio.h
index 00dea1b..5ce98ca 100644
--- a/src/H5FDmpio.h
+++ b/src/H5FDmpio.h
@@ -223,7 +223,7 @@ H5_DLL herr_t H5Pset_dxpl_mpio_collective_opt(hid_t dxpl_id, H5FD_mpio_collectiv
*
* Use of this function is optional.
*
- * \todo Add missing version information
+ * \since 1.8.0
*
*/
H5_DLL herr_t H5Pset_dxpl_mpio_chunk_opt(hid_t dxpl_id, H5FD_mpio_chunk_opt_t opt_mode);
@@ -247,7 +247,7 @@ H5_DLL herr_t H5Pset_dxpl_mpio_chunk_opt(hid_t dxpl_id, H5FD_mpio_chunk_opt_t op
* otherwise, a separate I/O process will be invoked for each chunk
* (multi-chunk I/O).
*
- * \todo Add missing version information
+ * \since 1.8.0
*
*/
H5_DLL herr_t H5Pset_dxpl_mpio_chunk_opt_num(hid_t dxpl_id, unsigned num_chunk_per_proc);
@@ -272,7 +272,7 @@ H5_DLL herr_t H5Pset_dxpl_mpio_chunk_opt_num(hid_t dxpl_id, unsigned num_chunk_p
* percent_proc_per_chunk, the library will do collective I/O for this
* chunk; otherwise, independent I/O will be done for the chunk.
*
- * \todo Add missing version information
+ * \since 1.8.0
*
*/
H5_DLL herr_t H5Pset_dxpl_mpio_chunk_opt_ratio(hid_t dxpl_id, unsigned percent_num_proc_per_chunk);
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 3dcfa37..20c538f 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -176,6 +176,7 @@ static herr_t H5FD_multi_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, c
/* The class struct */
static const H5FD_class_t H5FD_multi_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5_VFD_MULTI, /* value */
"multi", /* name */
HADDR_MAX, /* maxaddr */
@@ -204,6 +205,10 @@ static const H5FD_class_t H5FD_multi_g = {
H5FD_multi_get_handle, /* get_handle */
H5FD_multi_read, /* read */
H5FD_multi_write, /* write */
+ NULL, /*read_vector */
+ NULL, /*write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
H5FD_multi_flush, /* flush */
H5FD_multi_truncate, /* truncate */
H5FD_multi_lock, /* lock */
@@ -517,7 +522,7 @@ H5FD_split_populate_config(const char *meta_ext, hid_t meta_plist_id, const char
meta_name_g[sizeof(meta_name_g) - 1] = '\0';
}
else
- sprintf(meta_name_g, "%%s%s", meta_ext);
+ snprintf(meta_name_g, sizeof(meta_name_g), "%%s%s", meta_ext);
}
else {
strncpy(meta_name_g, "%s.meta", sizeof(meta_name_g));
@@ -535,7 +540,7 @@ H5FD_split_populate_config(const char *meta_ext, hid_t meta_plist_id, const char
raw_name_g[sizeof(raw_name_g) - 1] = '\0';
}
else
- sprintf(raw_name_g, "%%s%s", raw_ext);
+ snprintf(raw_name_g, sizeof(raw_name_g), "%%s%s", raw_ext);
}
else {
strncpy(raw_name_g, "%s.raw", sizeof(raw_name_g));
@@ -634,7 +639,7 @@ H5FD_multi_populate_config(const H5FD_mem_t *memb_map, const hid_t *memb_fapl, c
if (!memb_name) {
assert(strlen(letters) == H5FD_MEM_NTYPES);
for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt = (H5FD_mem_t)(mt + 1)) {
- sprintf(_memb_name_g[mt], "%%s-%c.h5", letters[mt]);
+ snprintf(_memb_name_g[mt], 16, "%%s-%c.h5", letters[mt]);
_memb_name_ptrs[mt] = _memb_name_g[mt];
}
memb_name = _memb_name_ptrs;
diff --git a/src/H5FDperform.c b/src/H5FDperform.c
index 096fdd6..4a68c6e 100644
--- a/src/H5FDperform.c
+++ b/src/H5FDperform.c
@@ -29,13 +29,14 @@
* Function: H5FDperform_init
*
* Purpose: Ensure that the library is initialized and then call
- * the provided VFD initializer.
+ * the provided VFD initializer
*
- * Return: Success: identifier for the VFD just initialized
+ * Return: Success: Identifier for the VFD just initialized
* Failure: H5I_INVALID_HID
*-------------------------------------------------------------------------
*/
-hid_t H5FDperform_init(hid_t (*init)(void))
+hid_t
+H5FDperform_init(H5FD_init_t op)
{
hid_t ret_value = H5I_INVALID_HID; /* Return value */
@@ -43,16 +44,16 @@ hid_t H5FDperform_init(hid_t (*init)(void))
/*NO TRACE*/
/* It is possible that an application will evaluate an
- * `H5FD_*` symbol (`H5FD_FAMILY`, `H5FD_MULTI`, `H5FD_SEC2`, et
- * cetera) before the library has had an opportunity to initialize.
- * Call H5_init_library() to make sure that the library has been
- * initialized before `init` is run.
+ * `H5FD_*` symbol (`H5FD_FAMILY`, `H5FD_MULTI`, `H5FD_SEC2`, etc.
+ * before the library has had an opportunity to initialize. Call
+ * H5_init_library() to make sure that the library has been initialized
+ * before `init` is run.
*/
- if (H5_init_library() < 0) {
+ if (H5_init_library() < 0)
HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, H5I_INVALID_HID, "library initialization failed")
- }
- ret_value = init();
+ ret_value = op();
+
done:
FUNC_LEAVE_API_NOINIT(ret_value)
}
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 61b4c60..bcbc693 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -24,6 +24,7 @@
/* Private headers needed by this file */
#include "H5Pprivate.h" /* Property lists */
+#include "H5Sprivate.h" /* Dataspaces */
/*
* The MPI drivers are needed because there are
@@ -94,6 +95,9 @@ typedef enum H5FD_get_driver_kind_t {
H5FD_GET_DRIVER_BY_VALUE /* Value field is set */
} H5FD_get_driver_kind_t;
+/* Forward declarations for prototype arguments */
+struct H5S_t;
+
/*****************************/
/* Library Private Variables */
/*****************************/
@@ -140,6 +144,22 @@ H5_DLL herr_t H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags)
H5_DLL herr_t H5FD_get_fs_type_map(const H5FD_t *file, H5FD_mem_t *type_map);
H5_DLL herr_t H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/);
H5_DLL herr_t H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf);
+H5_DLL herr_t H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], void *bufs[] /* out */);
+H5_DLL herr_t H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[],
+ size_t sizes[], const void *bufs[] /* out */);
+H5_DLL herr_t H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, struct H5S_t **mem_spaces,
+ struct H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[],
+ void *bufs[] /* out */);
+H5_DLL herr_t H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, struct H5S_t **mem_spaces,
+ struct H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[],
+ const void *bufs[]);
+H5_DLL herr_t H5FD_read_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[],
+ hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[],
+ void *bufs[] /* out */);
+H5_DLL herr_t H5FD_write_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[],
+ hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[],
+ const void *bufs[]);
H5_DLL herr_t H5FD_flush(H5FD_t *file, hbool_t closing);
H5_DLL herr_t H5FD_truncate(H5FD_t *file, hbool_t closing);
H5_DLL herr_t H5FD_lock(H5FD_t *file, hbool_t rw);
@@ -152,6 +172,10 @@ H5_DLL herr_t H5FD_set_base_addr(H5FD_t *file, haddr_t base_addr);
H5_DLL haddr_t H5FD_get_base_addr(const H5FD_t *file);
H5_DLL herr_t H5FD_set_paged_aggr(H5FD_t *file, hbool_t paged);
+H5_DLL herr_t H5FD_sort_vector_io_req(hbool_t *vector_was_sorted, uint32_t count, H5FD_mem_t types[],
+ haddr_t addrs[], size_t sizes[], H5_flexible_const_ptr_t bufs[],
+ H5FD_mem_t **s_types_ptr, haddr_t **s_addrs_ptr, size_t **s_sizes_ptr,
+ H5_flexible_const_ptr_t **s_bufs_ptr);
H5_DLL herr_t H5FD_init(void);
/* Function prototypes for MPI based VFDs*/
diff --git a/src/H5FDros3.c b/src/H5FDros3.c
index df06526..fcce76d 100644
--- a/src/H5FDros3.c
+++ b/src/H5FDros3.c
@@ -237,6 +237,7 @@ static herr_t H5FD__ros3_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing
static herr_t H5FD__ros3_validate_config(const H5FD_ros3_fapl_t *fa);
static const H5FD_class_t H5FD_ros3_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_ROS3_VALUE, /* value */
"ros3", /* name */
MAXADDR, /* maxaddr */
@@ -265,6 +266,10 @@ static const H5FD_class_t H5FD_ros3_g = {
H5FD__ros3_get_handle, /* get_handle */
H5FD__ros3_read, /* read */
H5FD__ros3_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
NULL, /* flush */
H5FD__ros3_truncate, /* truncate */
NULL, /* lock */
@@ -304,8 +309,12 @@ H5FD_ros3_init(void)
HDfprintf(stdout, "H5FD_ros3_init() called.\n");
#endif
- if (H5I_VFL != H5I_get_type(H5FD_ROS3_g))
+ if (H5I_VFL != H5I_get_type(H5FD_ROS3_g)) {
H5FD_ROS3_g = H5FD_register(&H5FD_ros3_g, sizeof(H5FD_class_t), FALSE);
+ if (H5I_INVALID_HID == H5FD_ROS3_g) {
+ HGOTO_ERROR(H5E_ID, H5E_CANTREGISTER, H5I_INVALID_HID, "unable to register ros3");
+ }
+ }
#if ROS3_STATS
/* pre-compute statsbin boundaries
diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c
index 46f5fd4..cc417070 100644
--- a/src/H5FDsec2.c
+++ b/src/H5FDsec2.c
@@ -143,6 +143,7 @@ static herr_t H5FD__sec2_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, c
void **output);
static const H5FD_class_t H5FD_sec2_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_SEC2_VALUE, /* value */
"sec2", /* name */
MAXADDR, /* maxaddr */
@@ -171,6 +172,10 @@ static const H5FD_class_t H5FD_sec2_g = {
H5FD__sec2_get_handle, /* get_handle */
H5FD__sec2_read, /* read */
H5FD__sec2_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
NULL, /* flush */
H5FD__sec2_truncate, /* truncate */
H5FD__sec2_lock, /* lock */
diff --git a/src/H5FDspace.c b/src/H5FDspace.c
index de52dc3..48b06ba 100644
--- a/src/H5FDspace.c
+++ b/src/H5FDspace.c
@@ -148,7 +148,7 @@ H5FD__alloc_real(H5FD_t *file, H5FD_mem_t type, hsize_t size, haddr_t *frag_addr
FUNC_ENTER_PACKAGE
#ifdef H5FD_ALLOC_DEBUG
- HDfprintf(stderr, "%s: type = %u, size = %Hu\n", __func__, (unsigned)type, size);
+ HDfprintf(stderr, "%s: type = %u, size = %" PRIuHSIZE "\n", __func__, (unsigned)type, size);
#endif /* H5FD_ALLOC_DEBUG */
/* check args */
@@ -211,7 +211,7 @@ H5FD__alloc_real(H5FD_t *file, H5FD_mem_t type, hsize_t size, haddr_t *frag_addr
done:
#ifdef H5FD_ALLOC_DEBUG
- HDfprintf(stderr, "%s: ret_value = %a\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: ret_value = %" PRIuHADDR "\n", __func__, ret_value);
#endif /* H5FD_ALLOC_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FD__alloc_real() */
@@ -287,7 +287,8 @@ H5FD__free_real(H5FD_t *file, H5FD_mem_t type, haddr_t addr, hsize_t size)
HDassert(size > 0);
#ifdef H5FD_ALLOC_DEBUG
- HDfprintf(stderr, "%s: type = %u, addr = %a, size = %Hu\n", __func__, (unsigned)type, addr, size);
+ HDfprintf(stderr, "%s: type = %u, addr = %" PRIuHADDR ", size = %" PRIuHSIZE "\n", __func__,
+ (unsigned)type, addr, size);
#endif /* H5FD_ALLOC_DEBUG */
/* Sanity checking */
@@ -317,11 +318,11 @@ H5FD__free_real(H5FD_t *file, H5FD_mem_t type, haddr_t addr, hsize_t size)
eoa = file->cls->get_eoa(file, type);
#ifdef H5FD_ALLOC_DEBUG
- HDfprintf(stderr, "%s: eoa = %a\n", __func__, eoa);
+ HDfprintf(stderr, "%s: eoa = %" PRIuHADDR "\n", __func__, eoa);
#endif /* H5FD_ALLOC_DEBUG */
if (eoa == (addr + size)) {
#ifdef H5FD_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Reducing file size to = %a\n", __func__, addr);
+ HDfprintf(stderr, "%s: Reducing file size to = %" PRIuHADDR "\n", __func__, addr);
#endif /* H5FD_ALLOC_DEBUG */
if (file->cls->set_eoa(file, type, addr) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTSET, FAIL, "set end of space allocation request failed")
@@ -330,8 +331,8 @@ H5FD__free_real(H5FD_t *file, H5FD_mem_t type, haddr_t addr, hsize_t size)
else {
/* leak memory */
#ifdef H5FD_ALLOC_DEBUG
- HDfprintf(stderr, "%s: LEAKED MEMORY!!! type = %u, addr = %a, size = %Hu\n", __func__, (unsigned)type,
- addr, size);
+ HDfprintf(stderr, "%s: LEAKED MEMORY!!! type = %u, addr = %" PRIuHADDR ", size = %" PRIuHSIZE "\n",
+ __func__, (unsigned)type, addr, size);
#endif /* H5FD_ALLOC_DEBUG */
} /* end else */
diff --git a/src/H5FDsplitter.c b/src/H5FDsplitter.c
index 31438cd..124c54f 100644
--- a/src/H5FDsplitter.c
+++ b/src/H5FDsplitter.c
@@ -138,6 +138,7 @@ static herr_t H5FD__splitter_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flag
void **output);
static const H5FD_class_t H5FD_splitter_g = {
+ H5FD_CLASS_VERSION, /* struct version */
H5FD_SPLITTER_VALUE, /* value */
"splitter", /* name */
MAXADDR, /* maxaddr */
@@ -166,6 +167,10 @@ static const H5FD_class_t H5FD_splitter_g = {
H5FD__splitter_get_handle, /* get_handle */
H5FD__splitter_read, /* read */
H5FD__splitter_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
H5FD__splitter_flush, /* flush */
H5FD__splitter_truncate, /* truncate */
H5FD__splitter_lock, /* lock */
@@ -527,20 +532,20 @@ H5FD__splitter_get_default_wo_path(char *new_path, size_t new_path_len, const ch
HGOTO_ERROR(H5E_VFL, H5E_CANTSET, FAIL, "filename exceeds max length")
/* Determine if filename contains a ".h5" extension. */
- if ((file_extension = strstr(base_filename, ".h5"))) {
+ if ((file_extension = HDstrstr(base_filename, ".h5"))) {
/* Insert the suffix between the filename and ".h5" extension. */
HDstrcpy(new_path, base_filename);
- file_extension = strstr(new_path, ".h5");
+ file_extension = HDstrstr(new_path, ".h5");
HDsprintf(file_extension, "%s%s", suffix, ".h5");
}
- else if ((file_extension = strrchr(base_filename, '.'))) {
+ else if ((file_extension = HDstrrchr(base_filename, '.'))) {
char *new_extension_loc = NULL;
/* If the filename doesn't contain a ".h5" extension, but contains
* AN extension, just insert the suffix before that extension.
*/
HDstrcpy(new_path, base_filename);
- new_extension_loc = strrchr(new_path, '.');
+ new_extension_loc = HDstrrchr(new_path, '.');
HDsprintf(new_extension_loc, "%s%s", suffix, file_extension);
}
else {
diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c
index 122379a..6624685 100644
--- a/src/H5FDstdio.c
+++ b/src/H5FDstdio.c
@@ -183,41 +183,46 @@ static herr_t H5FD_stdio_unlock(H5FD_t *_file);
static herr_t H5FD_stdio_delete(const char *filename, hid_t fapl_id);
static const H5FD_class_t H5FD_stdio_g = {
- H5_VFD_STDIO, /* value */
- "stdio", /* name */
- MAXADDR, /* maxaddr */
- H5F_CLOSE_WEAK, /* fc_degree */
- H5FD_stdio_term, /* terminate */
- NULL, /* sb_size */
- NULL, /* sb_encode */
- NULL, /* sb_decode */
- 0, /* fapl_size */
- NULL, /* fapl_get */
- NULL, /* fapl_copy */
- NULL, /* fapl_free */
- 0, /* dxpl_size */
- NULL, /* dxpl_copy */
- NULL, /* dxpl_free */
- H5FD_stdio_open, /* open */
- H5FD_stdio_close, /* close */
- H5FD_stdio_cmp, /* cmp */
- H5FD_stdio_query, /* query */
- NULL, /* get_type_map */
- H5FD_stdio_alloc, /* alloc */
- NULL, /* free */
- H5FD_stdio_get_eoa, /* get_eoa */
- H5FD_stdio_set_eoa, /* set_eoa */
- H5FD_stdio_get_eof, /* get_eof */
- H5FD_stdio_get_handle, /* get_handle */
- H5FD_stdio_read, /* read */
- H5FD_stdio_write, /* write */
- H5FD_stdio_flush, /* flush */
- H5FD_stdio_truncate, /* truncate */
- H5FD_stdio_lock, /* lock */
- H5FD_stdio_unlock, /* unlock */
- H5FD_stdio_delete, /* del */
- NULL, /* ctl */
- H5FD_FLMAP_DICHOTOMY /* fl_map */
+ H5FD_CLASS_VERSION, /* struct version */
+ H5_VFD_STDIO, /* value */
+ "stdio", /* name */
+ MAXADDR, /* maxaddr */
+ H5F_CLOSE_WEAK, /* fc_degree */
+ H5FD_stdio_term, /* terminate */
+ NULL, /* sb_size */
+ NULL, /* sb_encode */
+ NULL, /* sb_decode */
+ 0, /* fapl_size */
+ NULL, /* fapl_get */
+ NULL, /* fapl_copy */
+ NULL, /* fapl_free */
+ 0, /* dxpl_size */
+ NULL, /* dxpl_copy */
+ NULL, /* dxpl_free */
+ H5FD_stdio_open, /* open */
+ H5FD_stdio_close, /* close */
+ H5FD_stdio_cmp, /* cmp */
+ H5FD_stdio_query, /* query */
+ NULL, /* get_type_map */
+ H5FD_stdio_alloc, /* alloc */
+ NULL, /* free */
+ H5FD_stdio_get_eoa, /* get_eoa */
+ H5FD_stdio_set_eoa, /* set_eoa */
+ H5FD_stdio_get_eof, /* get_eof */
+ H5FD_stdio_get_handle, /* get_handle */
+ H5FD_stdio_read, /* read */
+ H5FD_stdio_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
+ H5FD_stdio_flush, /* flush */
+ H5FD_stdio_truncate, /* truncate */
+ H5FD_stdio_lock, /* lock */
+ H5FD_stdio_unlock, /* unlock */
+ H5FD_stdio_delete, /* del */
+ NULL, /* ctl */
+ H5FD_FLMAP_DICHOTOMY /* fl_map */
};
/*-------------------------------------------------------------------------
diff --git a/src/H5FLprivate.h b/src/H5FLprivate.h
index 6519551..8e9d0d4 100644
--- a/src/H5FLprivate.h
+++ b/src/H5FLprivate.h
@@ -50,6 +50,11 @@
*/
/* #define H5FL_TRACK */
#ifdef H5FL_TRACK
+
+#ifndef H5_HAVE_CODESTACK
+#error "Free list tracking requires code stack to be enabled"
+#endif
+
/* Macro for inclusion in the free list allocation calls */
#define H5FL_TRACK_INFO , __FILE__, __func__, __LINE__
@@ -273,16 +278,17 @@ typedef struct H5FL_arr_head_t {
#define H5FL_BARR_DEFINE_STATIC(b, t, m) static H5FL_ARR_DEFINE_COMMON(sizeof(b), t, m)
/* Allocate an array of type 't' */
-#define H5FL_ARR_MALLOC(t, elem) H5FL_arr_malloc(&(H5FL_ARR_NAME(t)), elem)
+#define H5FL_ARR_MALLOC(t, elem) H5FL_arr_malloc(&(H5FL_ARR_NAME(t)), elem H5FL_TRACK_INFO)
/* Allocate an array of type 't' and clear it to all zeros */
-#define H5FL_ARR_CALLOC(t, elem) H5FL_arr_calloc(&(H5FL_ARR_NAME(t)), elem)
+#define H5FL_ARR_CALLOC(t, elem) H5FL_arr_calloc(&(H5FL_ARR_NAME(t)), elem H5FL_TRACK_INFO)
/* Free an array of type 't' */
#define H5FL_ARR_FREE(t, obj) (t *)H5FL_arr_free(&(H5FL_ARR_NAME(t)), obj)
/* Re-allocate an array of type 't' */
-#define H5FL_ARR_REALLOC(t, obj, new_elem) H5FL_arr_realloc(&(H5FL_ARR_NAME(t)), obj, new_elem)
+#define H5FL_ARR_REALLOC(t, obj, new_elem) \
+ H5FL_arr_realloc(&(H5FL_ARR_NAME(t)), obj, new_elem H5FL_TRACK_INFO)
#else /* H5_NO_ARR_FREE_LISTS */
/* Common macro for H5FL_ARR_DEFINE & H5FL_ARR_DEFINE_STATIC (and H5FL_BARR variants) */
@@ -405,10 +411,10 @@ H5_DLL void *H5FL_reg_calloc(H5FL_reg_head_t *head H5FL_TRACK_PARAMS);
H5_DLL void *H5FL_reg_free(H5FL_reg_head_t *head, void *obj);
/* Array free lists */
-H5_DLL void *H5FL_arr_malloc(H5FL_arr_head_t *head, size_t elem);
-H5_DLL void *H5FL_arr_calloc(H5FL_arr_head_t *head, size_t elem);
+H5_DLL void *H5FL_arr_malloc(H5FL_arr_head_t *head, size_t elem H5FL_TRACK_PARAMS);
+H5_DLL void *H5FL_arr_calloc(H5FL_arr_head_t *head, size_t elem H5FL_TRACK_PARAMS);
H5_DLL void *H5FL_arr_free(H5FL_arr_head_t *head, void *obj);
-H5_DLL void *H5FL_arr_realloc(H5FL_arr_head_t *head, void *obj, size_t new_elem);
+H5_DLL void *H5FL_arr_realloc(H5FL_arr_head_t *head, void *obj, size_t new_elem H5FL_TRACK_PARAMS);
/* Sequence free lists */
H5_DLL void *H5FL_seq_malloc(H5FL_seq_head_t *head, size_t elem H5FL_TRACK_PARAMS);
diff --git a/src/H5FS.c b/src/H5FS.c
index a50a0e2..3c259f4 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -143,7 +143,8 @@ H5FS_create(H5F_t *f, haddr_t *fs_addr, const H5FS_create_t *fs_create, uint16_t
/* Set the return value */
ret_value = fspace;
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: fspace = %p, fspace->addr = %a\n", __func__, fspace, fspace->addr);
+ HDfprintf(stderr, "%s: fspace = %p, fspace->addr = %" PRIuHADDR "\n", __func__, (void *)fspace,
+ fspace->addr);
#endif /* H5FS_DEBUG */
done:
@@ -152,7 +153,7 @@ done:
HDONE_ERROR(H5E_FSPACE, H5E_CANTFREE, NULL, "unable to destroy free space header")
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: Leaving, ret_value = %d\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: Leaving, ret_value = %p\n", __func__, (void *)ret_value);
#endif /* H5FS_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
} /* H5FS_create() */
@@ -180,8 +181,8 @@ H5FS_open(H5F_t *f, haddr_t fs_addr, uint16_t nclasses, const H5FS_section_class
FUNC_ENTER_NOAPI(NULL)
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: Opening free space manager, fs_addr = %a, nclasses = %Zu\n", __func__, fs_addr,
- nclasses);
+ HDfprintf(stderr, "%s: Opening free space manager, fs_addr = %" PRIuHADDR ", nclasses = %Zu\n", __func__,
+ fs_addr, nclasses);
#endif /* H5FS_DEBUG */
/* Check arguments. */
@@ -201,10 +202,10 @@ H5FS_open(H5F_t *f, haddr_t fs_addr, uint16_t nclasses, const H5FS_section_class
(fspace = (H5FS_t *)H5AC_protect(f, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, NULL, "unable to load free space header")
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: fspace->sect_addr = %a\n", __func__, fspace->sect_addr);
- HDfprintf(stderr, "%s: fspace->sect_size = %Hu\n", __func__, fspace->sect_size);
- HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu\n", __func__, fspace->alloc_sect_size);
- HDfprintf(stderr, "%s: fspace->sinfo = %p\n", __func__, fspace->sinfo);
+ HDfprintf(stderr, "%s: fspace->sect_addr = %" PRIuHADDR "\n", __func__, fspace->sect_addr);
+ HDfprintf(stderr, "%s: fspace->sect_size = %" PRIuHSIZE "\n", __func__, fspace->sect_size);
+ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %" PRIuHSIZE "\n", __func__, fspace->alloc_sect_size);
+ HDfprintf(stderr, "%s: fspace->sinfo = %p\n", __func__, (void *)fspace->sinfo);
HDfprintf(stderr, "%s: fspace->rc = %u\n", __func__, fspace->rc);
#endif /* H5FS_DEBUG */
@@ -248,7 +249,7 @@ H5FS_delete(H5F_t *f, haddr_t fs_addr)
FUNC_ENTER_NOAPI(FAIL)
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: Deleting free space manager, fs_addr = %a\n", __func__, fs_addr);
+ HDfprintf(stderr, "%s: Deleting free space manager, fs_addr = %" PRIuHADDR "\n", __func__, fs_addr);
#endif /* H5FS_DEBUG */
/* Check arguments. */
@@ -318,7 +319,7 @@ H5FS_delete(H5F_t *f, haddr_t fs_addr)
/* Delete serialized section storage, if there are any */
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: fspace->sect_addr = %a\n", __func__, fspace->sect_addr);
+ HDfprintf(stderr, "%s: fspace->sect_addr = %" PRIuHADDR "\n", __func__, fspace->sect_addr);
#endif /* H5FS_DEBUG */
if (fspace->serial_sect_count > 0) {
unsigned sinfo_status = 0; /* Free space section info's status in the metadata cache */
@@ -404,8 +405,8 @@ H5FS_close(H5F_t *f, H5FS_t *fspace)
HDassert(f);
HDassert(fspace);
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: Entering, fspace = %p, fspace->addr = %a, fspace->sinfo = %p\n", __func__, fspace,
- fspace->addr, fspace->sinfo);
+ HDfprintf(stderr, "%s: Entering, fspace = %p, fspace->addr = %" PRIuHADDR ", fspace->sinfo = %p\n",
+ __func__, (void *)fspace, fspace->addr, (void *)fspace->sinfo);
#endif /* H5FS_DEBUG */
/* Check if section info is valid */
@@ -413,11 +414,12 @@ H5FS_close(H5F_t *f, H5FS_t *fspace)
if (fspace->sinfo) {
#ifdef H5FS_DEBUG
HDfprintf(stderr,
- "%s: fspace->tot_sect_count = %Hu, fspace->serial_sect_count = %Hu, fspace->sect_addr = "
- "%a, fspace->rc = %u\n",
+ "%s: fspace->tot_sect_count = %" PRIuHSIZE ", fspace->serial_sect_count = %" PRIuHSIZE
+ ", fspace->sect_addr = %" PRIuHADDR ", fspace->rc = %u\n",
__func__, fspace->tot_sect_count, fspace->serial_sect_count, fspace->sect_addr, fspace->rc);
- HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", __func__,
- fspace->alloc_sect_size, fspace->sect_size);
+ HDfprintf(stderr,
+ "%s: fspace->alloc_sect_size = %" PRIuHSIZE ", fspace->sect_size = %" PRIuHSIZE "\n",
+ __func__, fspace->alloc_sect_size, fspace->sect_size);
#endif /* H5FS_DEBUG */
/* If there are sections to serialize, update them */
/* (if the free space manager is persistent) */
@@ -708,7 +710,7 @@ H5FS__incr(H5FS_t *fspace)
FUNC_ENTER_PACKAGE
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: Entering, fpace->addr = %a, fspace->rc = %u\n", __func__, fspace->addr,
+ HDfprintf(stderr, "%s: Entering, fpace->addr = %" PRIuHADDR ", fspace->rc = %u\n", __func__, fspace->addr,
fspace->rc);
#endif /* H5FS_DEBUG */
@@ -748,7 +750,7 @@ H5FS__decr(H5FS_t *fspace)
FUNC_ENTER_PACKAGE
#ifdef H5FS_DEBUG
- HDfprintf(stderr, "%s: Entering, fpace->addr = %a, fspace->rc = %u\n", __func__, fspace->addr,
+ HDfprintf(stderr, "%s: Entering, fpace->addr = %" PRIuHADDR ", fspace->rc = %u\n", __func__, fspace->addr,
fspace->rc);
#endif /* H5FS_DEBUG */
diff --git a/src/H5FSsection.c b/src/H5FSsection.c
index 6c5a850..55f8a94 100644
--- a/src/H5FSsection.c
+++ b/src/H5FSsection.c
@@ -123,7 +123,7 @@ H5FS__sinfo_new(H5F_t *f, H5FS_t *fspace)
HDassert(f);
HDassert(fspace);
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: fspace->addr = %a\n", __func__, fspace->addr);
+ HDfprintf(stderr, "%s: fspace->addr = %" PRIuHADDR "\n", __func__, fspace->addr);
#endif /* H5FS_SINFO_DEBUG */
/* Allocate the free space header */
@@ -136,7 +136,7 @@ H5FS__sinfo_new(H5F_t *f, H5FS_t *fspace)
sinfo->sect_off_size = (fspace->max_sect_addr + 7) / 8;
sinfo->sect_len_size = H5VM_limit_enc_size((uint64_t)fspace->max_sect_size);
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: fspace->max_sect_size = %Hu\n", __func__, fspace->max_sect_size);
+ HDfprintf(stderr, "%s: fspace->max_sect_size = %" PRIuHSIZE "\n", __func__, fspace->max_sect_size);
HDfprintf(stderr, "%s: fspace->max_sect_addr = %u\n", __func__, fspace->max_sect_addr);
HDfprintf(stderr, "%s: sinfo->nbins = %u\n", __func__, sinfo->nbins);
HDfprintf(stderr, "%s: sinfo->sect_off_size = %u, sinfo->sect_len_size = %u\n", __func__,
@@ -200,10 +200,12 @@ H5FS__sinfo_lock(H5F_t *f, H5FS_t *fspace, unsigned accmode)
FUNC_ENTER_STATIC
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: Called, fspace->addr = %a, fspace->sinfo = %p, fspace->sect_addr = %a\n", __func__,
- fspace->addr, fspace->sinfo, fspace->sect_addr);
- HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", __func__,
- fspace->alloc_sect_size, fspace->sect_size);
+ HDfprintf(stderr,
+ "%s: Called, fspace->addr = %" PRIuHADDR ", fspace->sinfo = %p, fspace->sect_addr = %" PRIuHADDR
+ "\n",
+ __func__, fspace->addr, (void *)fspace->sinfo, fspace->sect_addr);
+ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %" PRIuHSIZE ", fspace->sect_size = %" PRIuHSIZE "\n",
+ __func__, fspace->alloc_sect_size, fspace->sect_size);
#endif /* H5FS_SINFO_DEBUG */
/* Check arguments. */
@@ -251,8 +253,8 @@ H5FS__sinfo_lock(H5F_t *f, H5FS_t *fspace, unsigned accmode)
HDassert(H5F_addr_defined(fspace->addr));
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: Reading in existing sections, fspace->sect_addr = %a\n", __func__,
- fspace->sect_addr);
+ HDfprintf(stderr, "%s: Reading in existing sections, fspace->sect_addr = %" PRIuHADDR "\n",
+ __func__, fspace->sect_addr);
#endif /* H5FS_SINFO_DEBUG */
/* Protect the free space sections */
cache_udata.f = f;
@@ -289,10 +291,12 @@ H5FS__sinfo_lock(H5F_t *f, H5FS_t *fspace, unsigned accmode)
done:
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: Leaving, fspace->addr = %a, fspace->sinfo = %p, fspace->sect_addr = %a\n",
- __func__, fspace->addr, fspace->sinfo, fspace->sect_addr);
- HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", __func__,
- fspace->alloc_sect_size, fspace->sect_size);
+ HDfprintf(stderr,
+ "%s: Leaving, fspace->addr = %" PRIuHADDR
+ ", fspace->sinfo = %p, fspace->sect_addr = %" PRIuHADDR "\n",
+ __func__, fspace->addr, (void *)fspace->sinfo, fspace->sect_addr);
+ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %" PRIuHSIZE ", fspace->sect_size = %" PRIuHSIZE "\n",
+ __func__, fspace->alloc_sect_size, fspace->sect_size);
#endif /* H5FS_SINFO_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
} /* H5FS__sinfo_lock() */
@@ -331,14 +335,16 @@ H5FS__sinfo_unlock(H5F_t *f, H5FS_t *fspace, hbool_t modified)
FUNC_ENTER_STATIC
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: Called, modified = %t, fspace->addr = %a, fspace->sect_addr = %a\n", __func__,
- modified, fspace->addr, fspace->sect_addr);
+ HDfprintf(stderr,
+ "%s: Called, modified = %d, fspace->addr = %" PRIuHADDR ", fspace->sect_addr = %" PRIuHADDR
+ "\n",
+ __func__, modified, fspace->addr, fspace->sect_addr);
HDfprintf(
stderr,
- "%s: fspace->sinfo_lock_count = %u, fspace->sinfo_modified = %t, fspace->sinfo_protected = %t\n",
+ "%s: fspace->sinfo_lock_count = %u, fspace->sinfo_modified = %d, fspace->sinfo_protected = %d\n",
__func__, fspace->sinfo_lock_count, fspace->sinfo_modified, fspace->sinfo_protected);
- HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n", __func__,
- fspace->alloc_sect_size, fspace->sect_size);
+ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %" PRIuHSIZE ", fspace->sect_size = %" PRIuHSIZE "\n",
+ __func__, fspace->alloc_sect_size, fspace->sect_size);
#endif /* H5FS_SINFO_DEBUG */
/* Check arguments. */
@@ -490,7 +496,8 @@ H5FS__sinfo_unlock(H5F_t *f, H5FS_t *fspace, hbool_t modified)
#ifdef H5FS_SINFO_DEBUG
HDfprintf(stderr,
- "%s: Freeing section info on disk, old_sect_addr = %a, old_alloc_sect_size = %Hu\n",
+ "%s: Freeing section info on disk, old_sect_addr = %" PRIuHADDR
+ ", old_alloc_sect_size = %" PRIuHSIZE "\n",
__func__, old_sect_addr, old_alloc_sect_size);
#endif /* H5FS_SINFO_DEBUG */
/* Release space for section info in file */
@@ -1343,7 +1350,8 @@ H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flag
FUNC_ENTER_NOAPI(FAIL)
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: *sect = {%a, %Hu, %u, %s}\n", __func__, sect->addr, sect->size, sect->type,
+ HDfprintf(stderr, "%s: *sect = {%" PRIuHADDR ", %" PRIuHSIZE ", %u, %s}\n", __func__, sect->addr,
+ sect->size, sect->type,
(sect->state == H5FS_SECT_LIVE ? "H5FS_SECT_LIVE" : "H5FS_SECT_SERIALIZED"));
#endif /* H5FS_SINFO_DEBUG */
@@ -1384,7 +1392,7 @@ H5FS_sect_add(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigned flag
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't insert free space section into skip list")
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: fspace->tot_space = %Hu\n", __func__, fspace->tot_space);
+ HDfprintf(stderr, "%s: fspace->tot_space = %" PRIuHSIZE "\n", __func__, fspace->tot_space);
#endif /* H5FS_SINFO_DEBUG */
/* Mark free space sections as changed */
/* (if adding sections while deserializing sections, don't set the flag) */
@@ -1429,8 +1437,8 @@ H5FS_sect_try_extend(H5F_t *f, H5FS_t *fspace, haddr_t addr, hsize_t size, hsize
FUNC_ENTER_NOAPI(FAIL)
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: addr = %a, size = %Hu, extra_requested = %hu\n", __func__, addr, size,
- extra_requested);
+ HDfprintf(stderr, "%s: addr = %" PRIuHADDR ", size = %" PRIuHSIZE ", extra_requested = %" PRIuHSIZE "\n",
+ __func__, addr, size, extra_requested);
#endif /* H5FS_SINFO_DEBUG */
/* Check arguments. */
@@ -1442,9 +1450,10 @@ H5FS_sect_try_extend(H5F_t *f, H5FS_t *fspace, haddr_t addr, hsize_t size, hsize
/* Check for any sections on free space list */
#ifdef H5FS_SINFO_DEBUG
- HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu\n", __func__, fspace->tot_sect_count);
- HDfprintf(stderr, "%s: fspace->serial_sect_count = %Hu\n", __func__, fspace->serial_sect_count);
- HDfprintf(stderr, "%s: fspace->ghost_sect_count = %Hu\n", __func__, fspace->ghost_sect_count);
+ HDfprintf(stderr, "%s: fspace->tot_sect_count = %" PRIuHSIZE "\n", __func__, fspace->tot_sect_count);
+ HDfprintf(stderr, "%s: fspace->serial_sect_count = %" PRIuHSIZE "\n", __func__,
+ fspace->serial_sect_count);
+ HDfprintf(stderr, "%s: fspace->ghost_sect_count = %" PRIuHSIZE "\n", __func__, fspace->ghost_sect_count);
#endif /* H5FS_SINFO_DEBUG */
if (fspace->tot_sect_count > 0) {
H5FS_section_info_t *sect; /* Temporary free space section */
@@ -1592,7 +1601,7 @@ H5FS_sect_try_merge(H5F_t *f, H5FS_t *fspace, H5FS_section_info_t *sect, unsigne
} /* end if */
else {
/* Check if section is merged */
- if (sect->size > saved_fs_size) {
+ if (sect->size != saved_fs_size) {
if (H5FS__sect_link(fspace, sect, flags) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL,
"can't insert free space section into skip list")
diff --git a/src/H5Fio.c b/src/H5Fio.c
index 5a9d2c1..53fec97 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -233,12 +233,101 @@ H5F_block_write(H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size, const void
/* Pass through page buffer layer */
if (H5PB_write(f->shared, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "write through page buffer failed")
-
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F_block_write() */
/*-------------------------------------------------------------------------
+ * Function: H5F_shared_select_read
+ *
+ * Purpose: Reads some data from a file/server/etc into a buffer.
+ * The location of the data is defined by the mem_spaces and
+ * file_spaces dataspace arrays, along with the offsets
+ * array. The addresses is relative to the base address for
+ * the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * May 3 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_shared_select_read(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces,
+ H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[], void *bufs[] /* out */)
+{
+ H5FD_mem_t map_type; /* Mapped memory type */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f_sh);
+ HDassert((mem_spaces) || (count == 0));
+ HDassert((file_spaces) || (count == 0));
+ HDassert((offsets) || (count == 0));
+ HDassert((element_sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* Treat global heap as raw data */
+ map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type;
+
+ /* Pass down to file driver layer (bypass page buffer for now) */
+ if (H5FD_read_selection(f_sh->lf, map_type, count, mem_spaces, file_spaces, offsets, element_sizes,
+ bufs) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "selection read through file driver failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_shared_select_read() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_shared_select_write
+ *
+ * Purpose: Writes some data from a buffer to a file/server/etc.
+ * The location of the data is defined by the mem_spaces and
+ * file_spaces dataspace arrays, along with the offsets
+ * array. The addresses is relative to the base address for
+ * the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ * May 4 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_shared_select_write(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces,
+ H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[], const void *bufs[])
+{
+ H5FD_mem_t map_type; /* Mapped memory type */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f_sh);
+ HDassert((mem_spaces) || (count == 0));
+ HDassert((file_spaces) || (count == 0));
+ HDassert((offsets) || (count == 0));
+ HDassert((element_sizes) || (count == 0));
+ HDassert((bufs) || (count == 0));
+
+ /* Treat global heap as raw data */
+ map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type;
+
+ /* Pass down to file driver layer (bypass page buffer for now) */
+ if (H5FD_write_selection(f_sh->lf, map_type, count, mem_spaces, file_spaces, offsets, element_sizes,
+ bufs) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "selection write through file driver failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_shared_select_write() */
+
+/*-------------------------------------------------------------------------
* Function: H5F_flush_tagged_metadata
*
* Purpose: Flushes metadata with specified tag in the metadata cache
diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c
index 53d2d78..02d8d52 100644
--- a/src/H5Fmpi.c
+++ b/src/H5Fmpi.c
@@ -31,11 +31,12 @@
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fpkg.h" /* File access */
-#include "H5FDprivate.h" /* File drivers */
-#include "H5Iprivate.h" /* IDs */
+#include "H5private.h" /* Generic Functions */
+#include "H5CXprivate.h" /* API Contexts */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* File access */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5Iprivate.h" /* IDs */
#include "H5VLnative_private.h" /* Native VOL connector */
@@ -402,4 +403,189 @@ H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F_mpi_retrieve_comm */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_coll_metadata_reads
+ *
+ * Purpose: Determines whether collective metadata reads should be
+ * performed. This routine is meant to be the single source of
+ * truth for the collective metadata reads status, as it
+ * coordinates between the file-global flag and the flag set
+ * for the current operation in the current API context.
+ *
+ * Return: TRUE/FALSE (can't fail)
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5F_get_coll_metadata_reads(const H5F_t *file)
+{
+ H5P_coll_md_read_flag_t file_flag = H5P_USER_FALSE;
+ hbool_t ret_value = FALSE;
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ HDassert(file && file->shared);
+
+ /* Retrieve the file-global flag */
+ file_flag = H5F_COLL_MD_READ(file);
+
+ /* If file flag is set to H5P_FORCE_FALSE, exit early
+ * with FALSE, since collective metadata reads have
+ * been explicitly disabled somewhere in the library.
+ */
+ if (H5P_FORCE_FALSE == file_flag)
+ ret_value = FALSE;
+ else {
+ /* If file flag is set to H5P_USER_TRUE, ignore
+ * any settings in the API context. A file-global
+ * setting of H5P_USER_TRUE for collective metadata
+ * reads should ignore any settings on an Access
+ * Property List for an individual operation.
+ */
+ if (H5P_USER_TRUE == file_flag)
+ ret_value = TRUE;
+ else {
+ /* Get the collective metadata reads flag from
+ * the current API context.
+ */
+ ret_value = H5CX_get_coll_metadata_read();
+ }
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_get_coll_metadata_reads() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_set_coll_metadata_reads
+ *
+ * Purpose: Used to temporarily modify the collective metadata reads
+ * status. This is useful for cases where either:
+ *
+ * * Collective metadata reads are enabled, but need to be
+ * disabled for an operation about to occur that may trigger
+ * an independent metadata read (such as only rank 0 doing
+ * something)
+ *
+ * * Metadata reads are currently independent, but it is
+ * guaranteed that the application has maintained
+ * collectivity at the interface level (e.g., an operation
+ * that modifies metadata is being performed). In this case,
+ * it should be safe to enable collective metadata reads,
+ * barring any internal library issues that may occur
+ *
+ * After completion, the `file_flag` parameter will be set to
+ * the previous value of the file-global collective metadata
+ * reads flag. The `context_flag` parameter will be set to the
+ * previous value of the API context's collective metadata
+ * reads flag. Another call to this routine should be made to
+ * restore these values (see below warning).
+ *
+ * !! WARNING !!
+ * It is dangerous to modify the collective metadata reads
+ * status, as this can cause crashes, hangs and corruption in
+ * the HDF5 file when improperly done. Therefore, the
+ * `file_flag` and `context_flag` parameters are both
+ * mandatory, and it is assumed that the caller will guarantee
+ * these settings are restored with another call to this
+ * routine once the bracketed operation is complete.
+ * !! WARNING !!
+ *
+ * Return: Nothing
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5F_set_coll_metadata_reads(H5F_t *file, H5P_coll_md_read_flag_t *file_flag, hbool_t *context_flag)
+{
+ H5P_coll_md_read_flag_t prev_file_flag = H5P_USER_FALSE;
+ hbool_t prev_context_flag = FALSE;
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ HDassert(file && file->shared);
+ HDassert(file_flag);
+ HDassert(context_flag);
+
+ /* Save old state */
+ prev_file_flag = H5F_COLL_MD_READ(file);
+ prev_context_flag = H5CX_get_coll_metadata_read();
+
+ /* Set new desired state */
+ if (prev_file_flag != *file_flag) {
+ file->shared->coll_md_read = *file_flag;
+ *file_flag = prev_file_flag;
+ }
+ if (prev_context_flag != *context_flag) {
+ H5CX_set_coll_metadata_read(*context_flag);
+ *context_flag = prev_context_flag;
+ }
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* end H5F_set_coll_metadata_reads() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_mpi_get_file_block_type
+ *
+ * Purpose: Creates an MPI derived datatype for communicating an
+ * H5F_block_t structure. If `commit` is specified as TRUE,
+ * the resulting datatype will be committed and ready for
+ * use in communication. Otherwise, the type is only suitable
+ * for building other derived types.
+ *
+ * If TRUE is returned through `new_type_derived`, this lets
+ * the caller know that the datatype has been derived and
+ * should be freed with MPI_Type_free once it is no longer
+ * needed.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_mpi_get_file_block_type(hbool_t commit, MPI_Datatype *new_type, hbool_t *new_type_derived)
+{
+ MPI_Datatype types[2];
+ MPI_Aint displacements[2];
+ int block_lengths[2];
+ int field_count;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(new_type);
+ HDassert(new_type_derived);
+
+ *new_type_derived = FALSE;
+
+ field_count = 2;
+ HDassert(field_count == sizeof(types) / sizeof(MPI_Datatype));
+
+ block_lengths[0] = 1;
+ block_lengths[1] = 1;
+ displacements[0] = offsetof(H5F_block_t, offset);
+ displacements[1] = offsetof(H5F_block_t, length);
+ types[0] = HADDR_AS_MPI_TYPE;
+ types[1] = HSIZE_AS_MPI_TYPE;
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Type_create_struct(field_count, block_lengths, displacements, types, new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ *new_type_derived = TRUE;
+
+ if (commit && MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+done:
+ if (ret_value < 0) {
+ if (*new_type_derived) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_free(new_type)))
+ HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
+ *new_type_derived = FALSE;
+ }
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_mpi_get_file_block_type() */
+
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index a5ccbab..629aee1 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -758,6 +758,7 @@ struct H5O_loc_t;
struct H5HG_heap_t;
struct H5VL_class_t;
struct H5P_genplist_t;
+struct H5S_t;
/* Forward declarations for anonymous H5F objects */
@@ -923,6 +924,14 @@ H5_DLL herr_t H5F_shared_block_write(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_
const void *buf);
H5_DLL herr_t H5F_block_write(H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf);
+/* Functions that operate on selections of elements in the file */
+H5_DLL herr_t H5F_shared_select_read(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count,
+ struct H5S_t **mem_spaces, struct H5S_t **file_spaces, haddr_t offsets[],
+ size_t element_sizes[], void *bufs[] /* out */);
+H5_DLL herr_t H5F_shared_select_write(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count,
+ struct H5S_t **mem_spaces, struct H5S_t **file_spaces,
+ haddr_t offsets[], size_t element_sizes[], const void *bufs[]);
+
/* Functions that flush or evict */
H5_DLL herr_t H5F_flush_tagged_metadata(H5F_t *f, haddr_t tag);
H5_DLL herr_t H5F_evict_tagged_metadata(H5F_t *f, haddr_t tag);
@@ -962,6 +971,9 @@ H5_DLL MPI_Comm H5F_mpi_get_comm(const H5F_t *f);
H5_DLL int H5F_shared_mpi_get_size(const H5F_shared_t *f_sh);
H5_DLL int H5F_mpi_get_size(const H5F_t *f);
H5_DLL herr_t H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm);
+H5_DLL herr_t H5F_mpi_get_file_block_type(hbool_t commit, MPI_Datatype *new_type, hbool_t *new_type_derived);
+H5_DLL hbool_t H5F_get_coll_metadata_reads(const H5F_t *f);
+H5_DLL void H5F_set_coll_metadata_reads(H5F_t *f, H5P_coll_md_read_flag_t *file_flag, hbool_t *context_flag);
#endif /* H5_HAVE_PARALLEL */
/* External file cache routines */
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index c3230e1..671eec3 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -1606,7 +1606,7 @@ H5_DLL herr_t H5Fget_page_buffering_stats(hid_t file_id, unsigned accesses[2], u
* \brief Obtains information about a cache image if it exists
*
* \file_id
- * \param[out] image_addr Offset of the cache image if it exists, or \c HADDR_UNDEF if it does not
+ * \param[out] image_addr Offset of the cache image if it exists, or #HADDR_UNDEF if it does not
* \param[out] image_size Length of the cache image if it exists, or 0 if it does not
* \returns \herr_t
*
@@ -1878,6 +1878,7 @@ H5_DLL herr_t H5Fget_info1(hid_t obj_id, H5F_info1_t *file_info);
*
* \deprecated When?
*
+ * \todo In which version was this function introduced?
* \todo In which version was this function deprecated?
*
*/
@@ -1896,6 +1897,7 @@ H5_DLL herr_t H5Fset_latest_format(hid_t file_id, hbool_t latest_format);
* \details H5Fis_hdf5() determines whether a file is in the HDF5 format.
*
* \todo In which version was this function deprecated?
+ * \todo In which version was this function introduced?
*
*/
H5_DLL htri_t H5Fis_hdf5(const char *file_name);
diff --git a/src/H5Gprivate.h b/src/H5Gprivate.h
index 4cf4623..d1725f6 100644
--- a/src/H5Gprivate.h
+++ b/src/H5Gprivate.h
@@ -130,7 +130,7 @@ typedef enum {
typedef int H5G_own_loc_t;
/* Structure to store information about the name an object was opened with */
-typedef struct {
+typedef struct H5G_name_t {
H5RS_str_t *full_path_r; /* Path to object, as seen from root of current file mounting hierarchy */
H5RS_str_t *user_path_r; /* Path to object, as opened by user */
unsigned obj_hidden; /* Whether the object is visible in group hier. */
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index f409479..22ad09b 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -895,7 +895,7 @@ H5HF__cache_iblock_get_initial_load_size(void *_udata, size_t *image_len)
* Function: H5HF__cache_iblock_verify_chksum
*
* Purpose: Verify the computed checksum of the data structure is the
- * same as the stored chksum.
+ * same as the stored checksum.
*
* Return: Success: TRUE/FALSE
* Failure: Negative
diff --git a/src/H5HP.c b/src/H5HP.c
deleted file mode 100644
index d164223..0000000
--- a/src/H5HP.c
+++ /dev/null
@@ -1,904 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Purpose: Provides a heap abstract data type.
- *
- * (See chapter 11 - "Priority Queues" of _Algorithms_, by
- * Sedgewick for additional information)
- *
- */
-
-/* Private headers needed */
-#include "H5private.h" /* Generic Functions */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5HPprivate.h" /* Heap routines */
-#include "H5FLprivate.h" /* Memory management functions */
-
-/* Local Macros */
-#define H5HP_START_SIZE 16 /* Initial number of entries for heaps */
-
-/* Private typedefs & structs */
-
-/* Data structure for entries in the internal heap array */
-typedef struct {
- int val; /* Value to be used for heap condition */
- H5HP_info_t *obj; /* Pointer to object stored in heap */
-} H5HP_ent_t;
-
-/* Main heap data structure */
-struct H5HP_t {
- H5HP_type_t type; /* Type of heap (minimum or maximum value at "top") */
- size_t nobjs; /* Number of active objects in heap array */
- size_t nalloc; /* Number of allocated locations in heap array */
- H5HP_ent_t *heap; /* Pointer to array containing heap entries */
-};
-
-/* Static functions */
-static herr_t H5HP__swim_max(H5HP_t *heap, size_t loc);
-static herr_t H5HP__swim_min(H5HP_t *heap, size_t loc);
-static herr_t H5HP__sink_max(H5HP_t *heap, size_t loc);
-static herr_t H5HP__sink_min(H5HP_t *heap, size_t loc);
-
-/* Declare a free list to manage the H5HP_t struct */
-H5FL_DEFINE_STATIC(H5HP_t);
-
-/* Declare a free list to manage sequences of H5HP_ent_t */
-H5FL_SEQ_DEFINE_STATIC(H5HP_ent_t);
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP__swim_max
- PURPOSE
- Restore heap condition by moving an object upward
- USAGE
- herr_t H5HP__swim_max(heap, loc)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- size_t loc; IN: Location to start from
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Restore the heap condition for the heap's array by "swimming" the object
- at a location upward.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- This routine is for "maximum" value heaps.
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-static herr_t
-H5HP__swim_max(H5HP_t *heap, size_t loc)
-{
- int val; /* Temporary copy value of object to move in heap */
- H5HP_info_t *obj; /* Temporary pointer to object to move in heap */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC_NOERR
-
- /* Get copies of the information about the object to move in the heap */
- val = heap->heap[loc].val;
- obj = heap->heap[loc].obj;
-
- /* Move object up in heap until it's reached the maximum location possible */
- while (heap->heap[loc / 2].val < val) {
- /* Move object "above" current location in heap down */
- heap->heap[loc].val = heap->heap[loc / 2].val;
- heap->heap[loc].obj = heap->heap[loc / 2].obj;
-
- /* Update heap location for object which moved */
- heap->heap[loc].obj->heap_loc = loc;
-
- /* Move to location "above" current location */
- loc = loc / 2;
- } /* end while */
-
- /* Put object into heap at correct location */
- heap->heap[loc].val = val;
- heap->heap[loc].obj = obj;
-
- /* Update heap location for object */
- heap->heap[loc].obj->heap_loc = loc;
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP__swim_max() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP__swim_min
- PURPOSE
- Restore heap condition by moving an object upward
- USAGE
- herr_t H5HP__swim_min(heap, loc)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- size_t loc; IN: Location to start from
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Restore the heap condition for the heap's array by "swimming" the object
- at a location upward.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- This routine is for "minimum" value heaps.
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-static herr_t
-H5HP__swim_min(H5HP_t *heap, size_t loc)
-{
- int val; /* Temporary copy value of object to move in heap */
- H5HP_info_t *obj; /* Temporary pointer to object to move in heap */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC_NOERR
-
- /* Get copies of the information about the object to move in the heap */
- val = heap->heap[loc].val;
- obj = heap->heap[loc].obj;
-
- /* Move object up in heap until it's reached the minimum location possible */
- while (heap->heap[loc / 2].val > val) {
- /* Move object "above" current location in heap down */
- heap->heap[loc].val = heap->heap[loc / 2].val;
- heap->heap[loc].obj = heap->heap[loc / 2].obj;
-
- /* Update heap location for object which moved */
- heap->heap[loc].obj->heap_loc = loc;
-
- /* Move to location "above" current location */
- loc = loc / 2;
- } /* end while */
-
- /* Put object into heap at correct location */
- heap->heap[loc].val = val;
- heap->heap[loc].obj = obj;
-
- /* Update heap location for object */
- heap->heap[loc].obj->heap_loc = loc;
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP__swim_min() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP__sink_max
- PURPOSE
- Restore heap condition by moving an object downward
- USAGE
- herr_t H5HP__sink_max(heap, loc)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- size_t loc; IN: Location to start from
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Restore the heap condition for the heap's array by "sinking" the object
- at a location downward.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- This routine is for "maximum" value heaps.
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-static herr_t
-H5HP__sink_max(H5HP_t *heap, size_t loc)
-{
- int val; /* Temporary copy value of object to move in heap */
- void * obj; /* Temporary pointer to object to move in heap */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC_NOERR
-
- /* Get copies of the information about the object to move in the heap */
- val = heap->heap[loc].val;
- obj = heap->heap[loc].obj;
-
- /* Move object up in heap until it's reached the maximum location possible */
- while ((2 * loc) <= heap->nobjs) {
- size_t new_loc = loc * 2; /* New object's potential location area */
-
- /* Get the greater of the two objects below the location in heap */
- if (new_loc < heap->nobjs && (heap->heap[new_loc].val < heap->heap[new_loc + 1].val))
- new_loc++;
-
- /* Check if the object is smaller than the larger of the objects below it */
- /* If so, its in the correct location now, and we can get out */
- if (val >= heap->heap[new_loc].val)
- break;
-
- /* Move the greater of the two objects below the current location up */
- heap->heap[loc].val = heap->heap[new_loc].val;
- heap->heap[loc].obj = heap->heap[new_loc].obj;
-
- /* Update heap location for object which moved */
- heap->heap[loc].obj->heap_loc = loc;
-
- /* Move to location "below" current location */
- loc = new_loc;
- } /* end while */
-
- /* Put object into heap at correct location */
- heap->heap[loc].val = val;
- heap->heap[loc].obj = (H5HP_info_t *)obj;
-
- /* Update heap location for object */
- heap->heap[loc].obj->heap_loc = loc;
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP__sink_max() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP__sink_min
- PURPOSE
- Restore heap condition by moving an object downward
- USAGE
- herr_t H5HP__sink_min(heap, loc)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- size_t loc; IN: Location to start from
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Restore the heap condition for the heap's array by "sinking" the object
- at a location downward.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- This routine is for "minimum" value heaps.
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-static herr_t
-H5HP__sink_min(H5HP_t *heap, size_t loc)
-{
- int val; /* Temporary copy value of object to move in heap */
- void * obj; /* Temporary pointer to object to move in heap */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_STATIC_NOERR
-
- /* Get copies of the information about the object to move in the heap */
- val = heap->heap[loc].val;
- obj = heap->heap[loc].obj;
-
- /* Move object up in heap until it's reached the maximum location possible */
- while ((2 * loc) <= heap->nobjs) {
- size_t new_loc = loc * 2; /* New object's potential location area */
-
- /* Get the lesser of the two objects below the location in heap */
- if (new_loc < heap->nobjs && (heap->heap[new_loc].val > heap->heap[new_loc + 1].val))
- new_loc++;
-
- /* Check if the object is greater than the larger of the objects below it */
- /* If so, its in the correct location now, and we can get out */
- if (val <= heap->heap[new_loc].val)
- break;
-
- /* Move the greater of the two objects below the current location up */
- heap->heap[loc].val = heap->heap[new_loc].val;
- heap->heap[loc].obj = heap->heap[new_loc].obj;
-
- /* Update heap location for object which moved */
- heap->heap[loc].obj->heap_loc = loc;
-
- /* Move to location "below" current location */
- loc = new_loc;
- } /* end while */
-
- /* Put object into heap at correct location */
- heap->heap[loc].val = val;
- heap->heap[loc].obj = (H5HP_info_t *)obj;
-
- /* Update heap location for object */
- heap->heap[loc].obj->heap_loc = loc;
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP__sink_min() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_create
- PURPOSE
- Create a heap
- USAGE
- H5HP_t *H5HP_create(heap_type)
- H5HP_type_t heap_type; IN: Type of heap to create
-
- RETURNS
- Returns a pointer to a heap on success, NULL on failure.
- DESCRIPTION
- Create a priority queue. The SIZE is used to set the initial number of
- entries allocated.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-H5HP_t *
-H5HP_create(H5HP_type_t heap_type)
-{
- H5HP_t *new_heap = NULL; /* Pointer to new heap object created */
- H5HP_t *ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI(NULL)
-
- /* Check args */
- HDassert(heap_type == H5HP_MIN_HEAP || heap_type == H5HP_MAX_HEAP);
-
- /* Allocate ref-counted string structure */
- if ((new_heap = H5FL_MALLOC(H5HP_t)) == NULL)
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "memory allocation failed");
-
- /* Allocate the array to store the heap entries */
- if ((new_heap->heap = H5FL_SEQ_MALLOC(H5HP_ent_t, (size_t)(H5HP_START_SIZE + 1))) == NULL)
- HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, NULL, "memory allocation failed");
-
- /* Set the internal fields */
- new_heap->type = heap_type;
- new_heap->nobjs = 0;
- new_heap->nalloc = H5HP_START_SIZE + 1;
-
- /* Set the information in the 0'th location based on the type of heap */
- if (heap_type == H5HP_MIN_HEAP) {
- /* Set the value in the '0' location to be the minimum value, to
- * simplify the algorithms
- */
- new_heap->heap[0].val = INT_MIN;
- new_heap->heap[0].obj = NULL;
- } /* end if */
- else {
- /* Set the value in the '0' location to be the maximum value, to
- * simplify the algorithms
- */
- new_heap->heap[0].val = INT_MAX;
- new_heap->heap[0].obj = NULL;
- } /* end else */
-
- /* Set the return value */
- ret_value = new_heap;
-
-done:
- /* Error cleanup */
- if (NULL == ret_value) {
- if (NULL != new_heap) {
- if (NULL != new_heap->heap)
- new_heap->heap = H5FL_SEQ_FREE(H5HP_ent_t, new_heap->heap);
- new_heap = H5FL_FREE(H5HP_t, new_heap);
- } /* end if */
- } /* end if */
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_create() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_count
- PURPOSE
- Check the number of elements in a heap
- USAGE
- ssize_t H5HP_count(heap)
- const H5HP_t *heap; IN: Pointer to heap to query
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Checks the number of elements in heap
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-ssize_t
-H5HP_count(const H5HP_t *heap)
-{
- ssize_t ret_value; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check args */
- HDassert(heap);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Return the number of objects in the heap */
- H5_CHECK_OVERFLOW(heap->nobjs, size_t, ssize_t);
- ret_value = (ssize_t)heap->nobjs;
-
- /* No post-condition check necessary, since heap is constant */
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_count() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_insert
- PURPOSE
- Insert an object into a heap, with an initial value
- USAGE
- herr_t H5HP_insert(heap, val, obj)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- int val; IN: Initial value for object in heap
- void *obj; IN: Pointer to object to insert into heap
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Inserts a OBJ into a HEAP, with an initial VALue.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_insert(H5HP_t *heap, int val, void *obj)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Check args */
- HDassert(heap);
- HDassert(obj);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Increment number of objects in heap */
- heap->nobjs++;
-
- /* Check if we need to allocate more room for heap array */
- if (heap->nobjs >= heap->nalloc) {
- size_t n = MAX(H5HP_START_SIZE, 2 * (heap->nalloc - 1)) + 1;
- H5HP_ent_t *new_heap = H5FL_SEQ_REALLOC(H5HP_ent_t, heap->heap, n);
-
- if (!new_heap)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to extend heap array");
- heap->heap = new_heap;
- heap->nalloc = n;
- } /* end if */
-
- /* Insert new object at end of heap */
- heap->heap[heap->nobjs].val = val;
- heap->heap[heap->nobjs].obj = (H5HP_info_t *)obj;
- heap->heap[heap->nobjs].obj->heap_loc = heap->nobjs;
-
- /* Restore heap condition */
- if (heap->type == H5HP_MAX_HEAP) {
- if (H5HP__swim_max(heap, heap->nobjs) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "unable to restore heap condition");
- } /* end if */
- else {
- if (H5HP__swim_min(heap, heap->nobjs) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "unable to restore heap condition");
- } /* end else */
-
-done:
-
- /* Check internal consistency */
- /* (Post-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_insert() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_top
- PURPOSE
- Check the value of the top object in the heap
- USAGE
- herr_t H5HP_top(heap, val)
- const H5HP_t *heap; IN: Pointer to heap to modify
- int val; IN/OUT: Initial value for object in heap
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Checks the value of the top object in a heap
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_top(const H5HP_t *heap, int *val)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check args */
- HDassert(heap);
- HDassert(val);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Get value of the top object in the heap */
- *val = heap->heap[1].val;
-
- /* No post-condition check necessary, since heap is constant */
- FUNC_LEAVE_NOAPI(SUCCEED);
-} /* end H5HP_top() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_remove
- PURPOSE
- Remove an object into a heap
- USAGE
- herr_t H5HP_remove(heap, val, obj)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- int *val; OUT: Pointer to value of object removed from heap
- void **obj; OUT: Pointer to object removed from heap
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Removes the top object on a heap, returning its value and object pointer
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_remove(H5HP_t *heap, int *val, void **obj)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Check args */
- HDassert(heap);
- HDassert(val);
- HDassert(obj);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Check if there are any objects on the heap to remove */
- if (heap->nobjs == 0)
- HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "heap is empty");
-
- /* Get the information for the top object on the heap */
- HDassert(heap->heap[1].obj->heap_loc == 1);
- *val = heap->heap[1].val;
- *obj = heap->heap[1].obj;
-
- /* Move the last element in the heap to the top */
- heap->heap[1].val = heap->heap[heap->nobjs].val;
- heap->heap[1].obj = heap->heap[heap->nobjs].obj;
- heap->heap[1].obj->heap_loc = 1;
-
- /* Decrement number of objects in heap */
- heap->nobjs--;
-
- /* Restore heap condition, if there are objects on the heap */
- if (heap->nobjs > 0) {
- if (heap->type == H5HP_MAX_HEAP) {
- if (H5HP__sink_max(heap, (size_t)1) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "unable to restore heap condition");
- } /* end if */
- else {
- if (H5HP__sink_min(heap, (size_t)1) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTDELETE, FAIL, "unable to restore heap condition");
- } /* end else */
- } /* end if */
-
-done:
-
- /* Check internal consistency */
- /* (Post-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_remove() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_change
- PURPOSE
- Change the priority of an object on a heap
- USAGE
- herr_t H5HP_change(heap, val, obj)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- int val; IN: New priority value for object
- void *obj; IN: Pointer to object to modify
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Changes the priority of an object on a heap.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_change(H5HP_t *heap, int val, void *_obj)
-{
- H5HP_info_t *obj = (H5HP_info_t *)_obj; /* Alias for object */
- size_t obj_loc; /* Location of object in heap */
- int old_val; /* Object's old priority value */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Check args */
- HDassert(heap);
- HDassert(obj);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Get the location of the object in the heap */
- obj_loc = obj->heap_loc;
- HDassert(obj_loc > 0 && obj_loc <= heap->nobjs);
-
- /* Change the heap object's priority */
- old_val = heap->heap[obj_loc].val;
- heap->heap[obj_loc].val = val;
-
- /* Restore heap condition */
- if (val < old_val) {
- if (heap->type == H5HP_MAX_HEAP) {
- if (H5HP__sink_max(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition");
- } /* end if */
- else {
- if (H5HP__swim_min(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition");
- } /* end else */
- } /* end if */
- else {
- if (heap->type == H5HP_MAX_HEAP) {
- if (H5HP__swim_max(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition");
- } /* end if */
- else {
- if (H5HP__sink_min(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition");
- } /* end else */
- } /* end else */
-
-done:
-
- /* Check internal consistency */
- /* (Post-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_change() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_incr
- PURPOSE
- Increment the priority of an object on a heap
- USAGE
- herr_t H5HP_incr(heap, amt, obj)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- unsigned amt; IN: Amount to increase priority by
- void *obj; IN: Pointer to object to modify
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Increments the priority of an object on a heap by one.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_incr(H5HP_t *heap, unsigned amt, void *_obj)
-{
- H5HP_info_t *obj = (H5HP_info_t *)_obj; /* Alias for object */
- size_t obj_loc; /* Location of object in heap */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Check args */
- HDassert(heap);
- HDassert(obj);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Get the location of the object in the heap */
- obj_loc = obj->heap_loc;
- HDassert(obj_loc > 0 && obj_loc <= heap->nobjs);
-
- /* Change the heap object's priority */
- heap->heap[obj_loc].val += (int)amt;
-
- /* Restore heap condition */
- if (H5HP_MAX_HEAP == heap->type) {
- if (H5HP__swim_max(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition")
- } /* end if */
- else {
- if (H5HP__sink_min(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition")
- } /* end else */
-
-done:
-
- /* Check internal consistency */
- /* (Post-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_incr() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_decr
- PURPOSE
- Decrement the priority of an object on a heap
- USAGE
- herr_t H5HP_dec(heap, amt, obj)
- H5HP_t *heap; IN/OUT: Pointer to heap to modify
- unsigned amt; IN: Amount to decrease priority by
- void *obj; IN: Pointer to object to modify
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Decrements the priority of an object on a heap by one.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_decr(H5HP_t *heap, unsigned amt, void *_obj)
-{
- H5HP_info_t *obj = (H5HP_info_t *)_obj; /* Alias for object */
- size_t obj_loc; /* Location of object in heap */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Check args */
- HDassert(heap);
- HDassert(obj);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- /* Get the location of the object in the heap */
- obj_loc = obj->heap_loc;
- HDassert(obj_loc > 0 && obj_loc <= heap->nobjs);
-
- /* Change the heap object's priority */
- H5_CHECK_OVERFLOW(amt, unsigned, int);
- heap->heap[obj_loc].val -= (int)amt;
-
- /* Restore heap condition */
- if (heap->type == H5HP_MAX_HEAP) {
- if (H5HP__sink_max(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition");
- } /* end if */
- else {
- if (H5HP__swim_min(heap, obj_loc) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTRESTORE, FAIL, "unable to restore heap condition");
- } /* end else */
-
-done:
-
- /* Check internal consistency */
- /* (Post-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(heap->heap[0].obj == NULL);
-
- FUNC_LEAVE_NOAPI(ret_value);
-} /* end H5HP_decr() */
-
-/*--------------------------------------------------------------------------
- NAME
- H5HP_close
- PURPOSE
- Close a heap, deallocating it.
- USAGE
- herr_t H5HP_close(heap)
- H5HP_t *heap; IN/OUT: Pointer to heap to close
-
- RETURNS
- Returns non-negative on success, negative on failure.
- DESCRIPTION
- Close a heap, freeing all internal information. Any objects left in
- the heap are not deallocated.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
-herr_t
-H5HP_close(H5HP_t *heap)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check args */
- HDassert(heap);
-
- /* Check internal consistency */
- /* (Pre-condition) */
- HDassert(heap->nobjs < heap->nalloc);
- HDassert(heap->heap);
- HDassert((heap->type == H5HP_MAX_HEAP && heap->heap[0].val == INT_MAX) ||
- (heap->type == H5HP_MIN_HEAP && heap->heap[0].val == INT_MIN));
- HDassert(NULL == heap->heap[0].obj);
-
- /* Free internal structures for heap */
- heap->heap = H5FL_SEQ_FREE(H5HP_ent_t, heap->heap);
-
- /* Free actual heap object */
- heap = H5FL_FREE(H5HP_t, heap);
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* end H5HP_close() */
diff --git a/src/H5HPprivate.h b/src/H5HPprivate.h
deleted file mode 100644
index 50020bc..0000000
--- a/src/H5HPprivate.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * This file contains private information about the H5HP module
- */
-#ifndef H5HPprivate_H
-#define H5HPprivate_H
-
-/**************************************/
-/* Public headers needed by this file */
-/**************************************/
-#ifdef LATER
-#include "H5HPpublic.h"
-#endif /* LATER */
-
-/***************************************/
-/* Private headers needed by this file */
-/***************************************/
-#include "H5private.h"
-
-/************/
-/* Typedefs */
-/************/
-
-/* Typedef for heap struct (defined in H5HP.c) */
-typedef struct H5HP_t H5HP_t;
-
-/* Typedef for objects which can be inserted into heaps */
-/* This _must_ be the first field in objects which can be inserted into heaps */
-typedef struct H5HP_info_t {
- size_t heap_loc; /* Location of object in heap */
-} H5HP_info_t;
-
-/* Typedef for type of heap to create */
-typedef enum {
- H5HP_MIN_HEAP, /* Minimum values in heap are at the "top" */
- H5HP_MAX_HEAP /* Maximum values in heap are at the "top" */
-} H5HP_type_t;
-
-/**********/
-/* Macros */
-/**********/
-
-/********************/
-/* Private routines */
-/********************/
-H5_DLL H5HP_t *H5HP_create(H5HP_type_t heap_type);
-H5_DLL herr_t H5HP_insert(H5HP_t *heap, int val, void *obj);
-H5_DLL ssize_t H5HP_count(const H5HP_t *heap);
-H5_DLL herr_t H5HP_top(const H5HP_t *heap, int *val);
-H5_DLL herr_t H5HP_remove(H5HP_t *heap, int *val, void **ptr);
-H5_DLL herr_t H5HP_change(H5HP_t *heap, int val, void *obj);
-H5_DLL herr_t H5HP_incr(H5HP_t *heap, unsigned amt, void *obj);
-H5_DLL herr_t H5HP_decr(H5HP_t *heap, unsigned amt, void *obj);
-H5_DLL herr_t H5HP_close(H5HP_t *heap);
-
-#endif /* H5HPprivate_H */
diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h
index e5a826a..ca5f6e6 100644
--- a/src/H5Lpublic.h
+++ b/src/H5Lpublic.h
@@ -580,7 +580,7 @@ H5_DLL herr_t H5Lget_val_by_idx(hid_t loc_id, const char *group_name, H5_index_t
* name includes either a relative path or an absolute path to the
* target link, intermediate steps along the path must be verified
* before the existence of the target link can be safely checked. If
- * the path is not verified and an intermediate element of the path
+ * the path is not verified, and an intermediate element of the path
* does not exist, H5Lexists() will fail. The example in the next
* paragraph illustrates one step-by-step method for verifying the
* existence of a link with a relative or absolute path.
@@ -620,13 +620,13 @@ H5_DLL herr_t H5Lget_val_by_idx(hid_t loc_id, const char *group_name, H5_index_t
* H5Lexists() with arguments \c file, \c "/", and \c lapl
* returns a positive value; in other words,
* \Code{H5Lexists(file, "/", lapl)} returns a positive value.
- * In HDF5 version 1.8.16, this function returns 0.</li>
+ * In the HDF5 1.8 release, this function returns 0.</li>
* <li>Let \c root denote a valid HDF5 group identifier that refers to the
* root group of an HDF5 file, and let \c lapl denote a valid link
* access property list identifier. A call to H5Lexists() with
* arguments c root, \c "/", and \c lapl returns a positive value;
* in other words, \Code{H5Lexists(root, "/", lapl)} returns a positive
- * value. In HDF5 version 1.8.16, this function returns 0.</li>
+ * value. In the HDF5 1.8 release, this function returns 0.</li>
* </ol>
* Note that the function accepts link names and path names. This is
* potentially misleading to callers, and we plan to separate the
diff --git a/src/H5MF.c b/src/H5MF.c
index 4bd32a8..25c29b4 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -650,8 +650,10 @@ H5MF__add_sect(H5F_t *f, H5FD_mem_t alloc_type, H5FS_t *fspace, H5MF_free_sectio
H5AC_set_ring(fsm_ring, &orig_ring);
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: adding node, node->sect_info.addr = %a, node->sect_info.size = %Hu\n", __func__,
- node->sect_info.addr, node->sect_info.size);
+ HDfprintf(stderr,
+ "%s: adding node, node->sect_info.addr = %" PRIuHADDR ", node->sect_info.size = %" PRIuHSIZE
+ "\n",
+ __func__, node->sect_info.addr, node->sect_info.size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Add the section */
if (H5FS_sect_add(f, fspace, (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata) < 0)
@@ -703,7 +705,7 @@ H5MF__find_sect(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5FS_t *fspace, h
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "error locating free space in file")
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: section found = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: section found = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Check for actually finding section */
@@ -731,7 +733,7 @@ H5MF__find_sect(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5FS_t *fspace, h
node->sect_info.size -= size;
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: re-adding node, node->sect_info.size = %Hu\n", __func__,
+ HDfprintf(stderr, "%s: re-adding node, node->sect_info.size = %" PRIuHSIZE "\n", __func__,
node->sect_info.size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
@@ -775,7 +777,7 @@ H5MF_alloc(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size)
FUNC_ENTER_NOAPI_TAG(H5AC__FREESPACE_TAG, HADDR_UNDEF)
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", __func__, (unsigned)alloc_type, size);
+ HDfprintf(stderr, "%s: alloc_type = %u, size = %" PRIuHSIZE "\n", __func__, (unsigned)alloc_type, size);
#endif /* H5MF_ALLOC_DEBUG */
/* check arguments */
@@ -848,7 +850,8 @@ done:
H5AC_set_ring(orig_ring, NULL);
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", __func__, ret_value, size);
+ HDfprintf(stderr, "%s: Leaving: ret_value = %" PRIuHADDR ", size = %" PRIuHSIZE "\n", __func__, ret_value,
+ size);
#endif /* H5MF_ALLOC_DEBUG */
#ifdef H5MF_ALLOC_DEBUG_DUMP
H5MF__sects_dump(f, stderr);
@@ -888,7 +891,7 @@ H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size)
FUNC_ENTER_STATIC
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", __func__, (unsigned)alloc_type, size);
+ HDfprintf(stderr, "%s: alloc_type = %u, size = %" PRIuHSIZE "\n", __func__, (unsigned)alloc_type, size);
#endif /* H5MF_ALLOC_DEBUG */
H5MF__alloc_to_fs_type(f->shared, alloc_type, size, &ptype);
@@ -985,7 +988,8 @@ H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size)
done:
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", __func__, ret_value, size);
+ HDfprintf(stderr, "%s: Leaving: ret_value = %" PRIuHADDR ", size = %" PRIuHSIZE "\n", __func__, ret_value,
+ size);
#endif /* H5MF_ALLOC_DEBUG */
#ifdef H5MF_ALLOC_DEBUG_DUMP
H5MF__sects_dump(f, stderr);
@@ -1031,7 +1035,7 @@ H5MF_alloc_tmp(H5F_t *f, hsize_t size)
FUNC_ENTER_NOAPI(HADDR_UNDEF)
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: size = %Hu\n", __func__, size);
+ HDfprintf(stderr, "%s: size = %" PRIuHSIZE "\n", __func__, size);
#endif /* H5MF_ALLOC_DEBUG */
/* check args */
@@ -1083,8 +1087,8 @@ H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size)
FUNC_ENTER_NOAPI_TAG(H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", __func__,
- (unsigned)alloc_type, addr, size);
+ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %" PRIuHADDR ", size = %" PRIuHSIZE "\n",
+ __func__, (unsigned)alloc_type, addr, size);
#endif /* H5MF_ALLOC_DEBUG */
/* check arguments */
@@ -1133,7 +1137,7 @@ H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size)
* space is at the end of the file
*/
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: fs_addr = %a\n", __func__, f->shared->fs_addr[fs_type]);
+ HDfprintf(stderr, "%s: fs_addr = %" PRIuHADDR "\n", __func__, f->shared->fs_addr[fs_type]);
#endif /* H5MF_ALLOC_DEBUG_MORE */
if (!H5F_addr_defined(f->shared->fs_addr[fs_type])) {
htri_t status; /* "can absorb" status for section into */
@@ -1149,8 +1153,9 @@ H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size)
HGOTO_DONE(SUCCEED)
else if (size < f->shared->fs_threshold) {
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: dropping addr = %a, size = %Hu, on the floor!\n", __func__, addr,
- size);
+ HDfprintf(stderr,
+ "%s: dropping addr = %" PRIuHADDR ", size = %" PRIuHSIZE ", on the floor!\n",
+ __func__, addr, size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
HGOTO_DONE(SUCCEED)
} /* end else-if */
@@ -1167,7 +1172,8 @@ H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size)
*/
if (f->shared->fs_state[fs_type] == H5F_FS_STATE_DELETING || !H5F_HAVE_FREE_SPACE_MANAGER(f)) {
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: dropping addr = %a, size = %Hu, on the floor!\n", __func__, addr, size);
+ HDfprintf(stderr, "%s: dropping addr = %" PRIuHADDR ", size = %" PRIuHSIZE ", on the floor!\n",
+ __func__, addr, size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
HGOTO_DONE(SUCCEED)
} /* end if */
@@ -1276,7 +1282,9 @@ H5MF_try_extend(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size, hsi
FUNC_ENTER_NOAPI_TAG(H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Entering: alloc_type = %u, addr = %a, size = %Hu, extra_requested = %Hu\n",
+ HDfprintf(stderr,
+ "%s: Entering: alloc_type = %u, addr = %" PRIuHADDR ", size = %" PRIuHSIZE
+ ", extra_requested = %" PRIuHSIZE "\n",
__func__, (unsigned)alloc_type, addr, size, extra_requested);
#endif /* H5MF_ALLOC_DEBUG */
@@ -1329,7 +1337,7 @@ H5MF_try_extend(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size, hsi
if ((ret_value = H5F__try_extend(f, map_type, end, extra_requested + frag_size)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending file")
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: extended = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: extended = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* If extending at EOA succeeds: */
@@ -1367,7 +1375,7 @@ H5MF_try_extend(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size, hsi
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending aggregation block")
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: H5MF__aggr_try_extend = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: H5MF__aggr_try_extend = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
@@ -1393,7 +1401,7 @@ H5MF_try_extend(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size, hsi
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL,
"error extending block in free space manager")
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Try to H5FS_sect_try_extend = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: Try to H5FS_sect_try_extend = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
@@ -1404,7 +1412,7 @@ H5MF_try_extend(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size, hsi
if (frag_size <= H5F_PGEND_META_THRES(f) && extra_requested <= frag_size)
ret_value = TRUE;
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Try to extend into the page end threshold = %t\n", __func__,
+ HDfprintf(stderr, "%s: Try to extend into the page end threshold = %d\n", __func__,
ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
@@ -1417,7 +1425,7 @@ done:
H5AC_set_ring(orig_ring, NULL);
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: Leaving: ret_value = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG */
#ifdef H5MF_ALLOC_DEBUG_DUMP
H5MF__sects_dump(f, stderr);
@@ -1452,8 +1460,8 @@ H5MF_try_shrink(H5F_t *f, H5FD_mem_t alloc_type, haddr_t addr, hsize_t size)
FUNC_ENTER_NOAPI_TAG(H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
- HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", __func__,
- (unsigned)alloc_type, addr, size);
+ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %" PRIuHADDR ", size = %" PRIuHSIZE "\n",
+ __func__, (unsigned)alloc_type, addr, size);
#endif /* H5MF_ALLOC_DEBUG */
/* check arguments */
@@ -1589,8 +1597,9 @@ H5MF__close_delete_fstype(H5F_t *f, H5F_mem_page_t type)
HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Check 1.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %a\n", __func__,
- (unsigned)type, f->shared->fs_man[type], (unsigned)type, f->shared->fs_addr[type]);
+ HDfprintf(stderr, "%s: Check 1.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %" PRIuHADDR "\n",
+ __func__, (unsigned)type, (void *)f->shared->fs_man[type], (unsigned)type,
+ f->shared->fs_addr[type]);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* If the free space manager for this type is open, close it */
@@ -1599,8 +1608,9 @@ H5MF__close_delete_fstype(H5F_t *f, H5F_mem_page_t type)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close the free space manager")
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Check 2.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %a\n", __func__,
- (unsigned)type, f->shared->fs_man[type], (unsigned)type, f->shared->fs_addr[type]);
+ HDfprintf(stderr, "%s: Check 2.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %" PRIuHADDR "\n",
+ __func__, (unsigned)type, (void *)f->shared->fs_man[type], (unsigned)type,
+ f->shared->fs_addr[type]);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* If there is free space manager info for this type, delete it */
diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c
index 8ffc5c8..78bf620 100644
--- a/src/H5MFaggr.c
+++ b/src/H5MFaggr.c
@@ -92,7 +92,7 @@ H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size)
FUNC_ENTER_NOAPI(HADDR_UNDEF)
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", __func__, (unsigned)alloc_type, size);
+ HDfprintf(stderr, "%s: alloc_type = %u, size = %" PRIuHSIZE "\n", __func__, (unsigned)alloc_type, size);
#endif /* H5MF_AGGR_DEBUG */
/* check arguments */
@@ -120,7 +120,8 @@ H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size)
done:
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", __func__, ret_value, size);
+ HDfprintf(stderr, "%s: Leaving: ret_value = %" PRIuHADDR ", size = %" PRIuHSIZE "\n", __func__, ret_value,
+ size);
#endif /* H5MF_AGGR_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
@@ -150,7 +151,7 @@ H5MF__aggr_alloc(H5F_t *f, H5F_blk_aggr_t *aggr, H5F_blk_aggr_t *other_aggr, H5F
FUNC_ENTER_STATIC
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: type = %u, size = %Hu\n", __func__, (unsigned)type, size);
+ HDfprintf(stderr, "%s: type = %u, size = %" PRIuHSIZE "\n", __func__, (unsigned)type, size);
#endif /* H5MF_AGGR_DEBUG */
/* check args */
@@ -199,8 +200,8 @@ H5MF__aggr_alloc(H5F_t *f, H5F_blk_aggr_t *aggr, H5F_blk_aggr_t *other_aggr, H5F
H5FD_mem_t alloc_type, other_alloc_type; /* Current aggregator & 'other' aggregator types */
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: aggr = {%a, %Hu, %Hu}\n", __func__, aggr->addr, aggr->tot_size,
- aggr->size);
+ HDfprintf(stderr, "%s: aggr = {%" PRIuHADDR ", %" PRIuHSIZE ", %" PRIuHSIZE "}\n", __func__,
+ aggr->addr, aggr->tot_size, aggr->size);
#endif /* H5MF_AGGR_DEBUG */
/* Turn off alignment if allocation < threshold */
@@ -388,7 +389,7 @@ H5MF__aggr_alloc(H5F_t *f, H5F_blk_aggr_t *aggr, H5F_blk_aggr_t *other_aggr, H5F
done:
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: ret_value = %a\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: ret_value = %" PRIuHADDR "\n", __func__, ret_value);
#endif /* H5MF_AGGR_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5MF__aggr_alloc() */
@@ -537,8 +538,11 @@ done:
if (H5F_addr_eq((sect->sect_info.addr + sect->sect_info.size), aggr->addr) ||
H5F_addr_eq((aggr->addr + aggr->size), sect->sect_info.addr)) {
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: section {%a, %Hu} adjoins aggr = {%a, %Hu}\n", "H5MF__aggr_can_absorb",
- sect->sect_info.addr, sect->sect_info.size, aggr->addr, aggr->size);
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "} adjoins aggr = {%" PRIuHADDR
+ ", %" PRIuHSIZE "}\n",
+ "H5MF__aggr_can_absorb", sect->sect_info.addr, sect->sect_info.size, aggr->addr,
+ aggr->size);
#endif /* H5MF_AGGR_DEBUG */
/* Check if aggregator would get too large and should be absorbed into section */
if ((aggr->size + sect->sect_info.size) >= aggr->alloc_size)
@@ -587,7 +591,9 @@ done:
/* Check if the section adjoins the beginning or end of the aggregator */
if (H5F_addr_eq((sect->sect_info.addr + sect->sect_info.size), aggr->addr)) {
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: aggr {%a, %Hu} adjoins front of section = {%a, %Hu}\n",
+ HDfprintf(stderr,
+ "%s: aggr {%" PRIuHADDR ", %" PRIuHSIZE "} adjoins front of section = {%" PRIuHADDR
+ ", %" PRIuHSIZE "}\n",
"H5MF__aggr_absorb", aggr->addr, aggr->size, sect->sect_info.addr,
sect->sect_info.size);
#endif /* H5MF_AGGR_DEBUG */
@@ -599,7 +605,9 @@ done:
HDassert(H5F_addr_eq((aggr->addr + aggr->size), sect->sect_info.addr));
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: aggr {%a, %Hu} adjoins end of section = {%a, %Hu}\n",
+ HDfprintf(stderr,
+ "%s: aggr {%" PRIuHADDR ", %" PRIuHSIZE "} adjoins end of section = {%" PRIuHADDR
+ ", %" PRIuHSIZE "}\n",
"H5MF__aggr_absorb", aggr->addr, aggr->size, sect->sect_info.addr,
sect->sect_info.size);
#endif /* H5MF_AGGR_DEBUG */
@@ -617,7 +625,9 @@ done:
/* Check if the section adjoins the beginning or end of the aggregator */
if (H5F_addr_eq((sect->sect_info.addr + sect->sect_info.size), aggr->addr)) {
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: section {%a, %Hu} adjoins front of aggr = {%a, %Hu}\n",
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "} adjoins front of aggr = {%" PRIuHADDR
+ ", %" PRIuHSIZE "}\n",
"H5MF__aggr_absorb", sect->sect_info.addr, sect->sect_info.size, aggr->addr,
aggr->size);
#endif /* H5MF_AGGR_DEBUG */
@@ -635,7 +645,9 @@ done:
HDassert(H5F_addr_eq((aggr->addr + aggr->size), sect->sect_info.addr));
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: section {%a, %Hu} adjoins end of aggr = {%a, %Hu}\n",
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "} adjoins end of aggr = {%" PRIuHADDR
+ ", %" PRIuHSIZE "}\n",
"H5MF__aggr_absorb", sect->sect_info.addr, sect->sect_info.size, aggr->addr,
aggr->size);
#endif /* H5MF_AGGR_DEBUG */
@@ -723,7 +735,8 @@ done:
tmp_addr = aggr->addr;
tmp_size = aggr->size;
#ifdef H5MF_AGGR_DEBUG
- HDfprintf(stderr, "%s: tmp_addr = %a, tmp_size = %Hu\n", __func__, tmp_addr, tmp_size);
+ HDfprintf(stderr, "%s: tmp_addr = %" PRIuHADDR ", tmp_size = %" PRIuHSIZE "\n", __func__,
+ tmp_addr, tmp_size);
#endif /* H5MF_AGGR_DEBUG */
/* Reset aggregator block information */
diff --git a/src/H5MFsection.c b/src/H5MFsection.c
index 69b2ca0..13675f5 100644
--- a/src/H5MFsection.c
+++ b/src/H5MFsection.c
@@ -472,8 +472,9 @@ H5MF__sect_simple_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
/* Set the shrinking type */
udata->shrink = H5MF_SHRINK_EOA;
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: section {%a, %Hu}, shrinks file, eoa = %a\n", __func__, sect->sect_info.addr,
- sect->sect_info.size, eoa);
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "}, shrinks file, eoa = %" PRIuHADDR "\n",
+ __func__, sect->sect_info.addr, sect->sect_info.size, eoa);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Indicate shrinking can occur */
@@ -496,8 +497,9 @@ H5MF__sect_simple_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
/* Set the aggregator to operate on */
udata->aggr = &(udata->f->shared->meta_aggr);
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: section {%a, %Hu}, adjoins metadata aggregator\n", __func__,
- sect->sect_info.addr, sect->sect_info.size);
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "}, adjoins metadata aggregator\n",
+ __func__, sect->sect_info.addr, sect->sect_info.size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Indicate shrinking can occur */
@@ -517,8 +519,9 @@ H5MF__sect_simple_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
/* Set the aggregator to operate on */
udata->aggr = &(udata->f->shared->sdata_aggr);
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: section {%a, %Hu}, adjoins small data aggregator\n", __func__,
- sect->sect_info.addr, sect->sect_info.size);
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "}, adjoins small data aggregator\n",
+ __func__, sect->sect_info.addr, sect->sect_info.size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Indicate shrinking can occur */
@@ -625,8 +628,8 @@ H5MF__sect_small_add(H5FS_section_info_t **_sect, unsigned *flags, void *_udata)
FUNC_ENTER_STATIC
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Entering, section {%a, %Hu}\n", __func__, (*sect)->sect_info.addr,
- (*sect)->sect_info.size);
+ HDfprintf(stderr, "%s: Entering, section {%" PRIuHADDR ", %" PRIuHSIZE "}\n", __func__,
+ (*sect)->sect_info.addr, (*sect)->sect_info.size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Do not adjust the section raw data or global heap data */
@@ -653,8 +656,8 @@ H5MF__sect_small_add(H5FS_section_info_t **_sect, unsigned *flags, void *_udata)
else if (prem <= H5F_PGEND_META_THRES(udata->f)) {
(*sect)->sect_info.size += prem;
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: section is adjusted {%a, %Hu}\n", __func__, (*sect)->sect_info.addr,
- (*sect)->sect_info.size);
+ HDfprintf(stderr, "%s: section is adjusted {%" PRIuHADDR ", %" PRIuHSIZE "}\n", __func__,
+ (*sect)->sect_info.addr, (*sect)->sect_info.size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
@@ -702,7 +705,7 @@ H5MF__sect_small_can_merge(const H5FS_section_info_t *_sect1, const H5FS_section
ret_value = FALSE;
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: Leaving: ret_value = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
FUNC_LEAVE_NOAPI(ret_value)
@@ -806,7 +809,7 @@ H5MF__sect_large_can_merge(const H5FS_section_info_t *_sect1, const H5FS_section
ret_value = H5F_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr);
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", __func__, ret_value);
+ HDfprintf(stderr, "%s: Leaving: ret_value = %d\n", __func__, ret_value);
#endif /* H5MF_ALLOC_DEBUG_MORE */
FUNC_LEAVE_NOAPI(ret_value)
@@ -894,8 +897,9 @@ H5MF__sect_large_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
/* Set the shrinking type */
udata->shrink = H5MF_SHRINK_EOA;
#ifdef H5MF_ALLOC_DEBUG_MORE
- HDfprintf(stderr, "%s: section {%a, %Hu}, shrinks file, eoa = %a\n", __func__, sect->sect_info.addr,
- sect->sect_info.size, eoa);
+ HDfprintf(stderr,
+ "%s: section {%" PRIuHADDR ", %" PRIuHSIZE "}, shrinks file, eoa = %" PRIuHADDR "\n",
+ __func__, sect->sect_info.addr, sect->sect_info.size, eoa);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Indicate shrinking can occur */
diff --git a/src/H5MP.c b/src/H5MP.c
deleted file mode 100644
index 397d26b..0000000
--- a/src/H5MP.c
+++ /dev/null
@@ -1,443 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*-------------------------------------------------------------------------
- *
- * Created: H5MP.c
- * May 2 2005
- * Quincey Koziol
- *
- * Purpose: Implements memory pools. (Similar to Apache's APR
- * memory pools)
- *
- * Please see the documentation in:
- * doc/html/TechNotes/MemoryPools.html for a full description
- * of how they work, etc.
- *
- *-------------------------------------------------------------------------
- */
-
-#include "H5MPmodule.h" /* This source code file is part of the H5MP module */
-
-/* Private headers */
-#include "H5private.h" /* Generic Functions */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5MMprivate.h" /* Memory management */
-#include "H5MPpkg.h" /* Memory Pools */
-
-/****************/
-/* Local Macros */
-/****************/
-
-/* Minimum sized block */
-#define H5MP_MIN_BLOCK (H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)) + H5MP_BLOCK_ALIGNMENT)
-
-/* First block in page */
-#define H5MP_PAGE_FIRST_BLOCK(p) \
- (H5MP_page_blk_t *)((void *)((unsigned char *)(p) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
-
-/******************/
-/* Local Typedefs */
-/******************/
-
-/********************/
-/* Local Prototypes */
-/********************/
-
-/********************************/
-/* Package Variable Definitions */
-/********************************/
-
-/********************/
-/* Static Variables */
-/********************/
-
-/* Declare a free list to manage the H5MP_pool_t struct */
-H5FL_DEFINE(H5MP_pool_t);
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_create
- *
- * Purpose: Create a new memory pool
- *
- * Return: Pointer to the memory pool "header" on success/NULL on failure
- *
- * Programmer: Quincey Koziol
- * May 2 2005
- *
- *-------------------------------------------------------------------------
- */
-H5MP_pool_t *
-H5MP_create(size_t page_size, unsigned flags)
-{
- H5MP_pool_t *mp = NULL; /* New memory pool header */
- H5MP_pool_t *ret_value = NULL; /* Return value */
-
- FUNC_ENTER_NOAPI(NULL)
-
- /* Allocate space for the pool header */
- if (NULL == (mp = H5FL_MALLOC(H5MP_pool_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for memory pool header")
-
- /* Assign information */
- mp->page_size = H5MP_BLOCK_ALIGN(page_size);
- mp->flags = flags;
-
- /* Initialize information */
- mp->free_size = 0;
- mp->first = NULL;
- mp->max_size = mp->page_size - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t));
-
- /* Create factory for pool pages */
- if (NULL == (mp->page_fac = H5FL_fac_init(page_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, NULL, "can't create page factory")
-
- /* Set return value */
- ret_value = mp;
-
-done:
- if (NULL == ret_value && mp)
- if (H5MP_close(mp) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTFREE, NULL, "unable to free memory pool header")
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MP_create() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP__new_page
- *
- * Purpose: Allocate new page for a memory pool
- *
- * Return: Pointer to the page allocated on success/NULL on failure
- *
- * Programmer: Quincey Koziol
- * May 4 2005
- *
- *-------------------------------------------------------------------------
- */
-static H5MP_page_t *
-H5MP__new_page(H5MP_pool_t *mp, size_t page_size)
-{
- H5MP_page_t * new_page; /* New page created */
- H5MP_page_blk_t *first_blk; /* Pointer to first block in page */
- H5MP_page_t * ret_value = NULL; /* Return value */
-
- FUNC_ENTER_STATIC
-
- /* Sanity check */
- HDassert(mp);
- HDassert(page_size >= mp->page_size);
-
- /* Allocate page */
- if (page_size > mp->page_size) {
- if (NULL == (new_page = (H5MP_page_t *)H5MM_malloc(page_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for page")
- new_page->free_size = page_size - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t));
- new_page->fac_alloc = FALSE;
- } /* end if */
- else {
- if (NULL == (new_page = (H5MP_page_t *)H5FL_FAC_MALLOC(mp->page_fac)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for page")
- new_page->free_size = mp->max_size;
- new_page->fac_alloc = TRUE;
- } /* end else */
-
- /* Initialize page information */
- first_blk = H5MP_PAGE_FIRST_BLOCK(new_page);
- first_blk->size = new_page->free_size;
- first_blk->page = new_page;
- first_blk->is_free = TRUE;
- first_blk->prev = NULL;
- first_blk->next = NULL;
-
- /* Insert into page list */
- new_page->prev = NULL;
- new_page->next = mp->first;
- if (mp->first)
- mp->first->prev = new_page;
- mp->first = new_page;
-
- /* Account for new free space */
- new_page->free_blk = first_blk;
- mp->free_size += new_page->free_size;
-
- /* Assign return value */
- ret_value = new_page;
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MP__new_page() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_malloc
- *
- * Purpose: Allocate space in a memory pool
- *
- * Return: Pointer to the space allocated on success/NULL on failure
- *
- * Programmer: Quincey Koziol
- * May 2 2005
- *
- *-------------------------------------------------------------------------
- */
-void *
-H5MP_malloc(H5MP_pool_t *mp, size_t request)
-{
- H5MP_page_t * alloc_page = NULL; /* Page to allocate space from */
- H5MP_page_blk_t *alloc_free; /* Pointer to free space in page */
- size_t needed; /* Size requested, plus block header and alignment */
- void * ret_value = NULL; /* Return value */
-
- FUNC_ENTER_NOAPI(NULL)
-
- /* Sanity check */
- HDassert(mp);
- HDassert(request > 0);
-
- /* Compute actual size needed */
- needed = H5MP_BLOCK_ALIGN(request) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t));
-
- /* See if the request can be handled by existing free space */
- if (needed <= mp->free_size) {
- size_t pool_free_avail; /* Amount of free space possibly available in pool */
-
- /* Locate page with enough free space */
- alloc_page = mp->first;
- pool_free_avail = mp->free_size;
- while (alloc_page && pool_free_avail >= needed) {
- /* If we found a page with enough free space, search for large
- * enough free block on that page */
- if (alloc_page->free_size >= needed) {
- size_t page_free_avail; /* Amount of free space possibly available */
-
- /* Locate large enough block */
- alloc_free = alloc_page->free_blk;
- page_free_avail = alloc_page->free_size;
- while (alloc_free && page_free_avail >= needed) {
- if (alloc_free->is_free) {
- /* If we found a large enough block, leave now */
- if (alloc_free->size >= needed)
- goto found; /* Needed to escape double "while" loop */
-
- /* Decrement amount of potential space left */
- page_free_avail -= alloc_free->size;
- } /* end if */
-
- /* Go to next block */
- alloc_free = alloc_free->next;
- } /* end while */
- } /* end if */
-
- /* Decrement amount of potential space left */
- pool_free_avail -= alloc_page->free_size;
-
- /* Go to next page */
- alloc_page = alloc_page->next;
- } /* end while */
- } /* end if */
-
- {
- size_t page_size; /* Size of page needed */
-
- /* Check if the request is too large for a standard page */
- page_size =
- (needed > mp->max_size) ? (needed + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))) : mp->page_size;
-
- /* Allocate new page */
- if (NULL == (alloc_page = H5MP__new_page(mp, page_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for page")
-
- /* Set the block to allocate from */
- alloc_free = alloc_page->free_blk;
- } /* end block */
-
- /* Allocate space in page */
-found:
-
- /* Sanity check */
- HDassert(alloc_page);
- HDassert(alloc_free);
-
- /* Check if we can subdivide the free space */
- if (alloc_free->size > (needed + H5MP_MIN_BLOCK)) {
- H5MP_page_blk_t *new_free; /* New free block created */
-
- /* Carve out new free block after block to allocate */
- new_free = (H5MP_page_blk_t *)((void *)(((unsigned char *)alloc_free) + needed));
-
- /* Link into existing lists */
- new_free->next = alloc_free->next;
- if (alloc_free->next)
- alloc_free->next->prev = new_free;
- new_free->prev = alloc_free;
- alloc_free->next = new_free;
-
- /* Set blocks' information */
- new_free->size = alloc_free->size - needed;
- new_free->is_free = TRUE;
- new_free->page = alloc_free->page;
- alloc_free->size = needed;
- alloc_free->is_free = FALSE;
- } /* end if */
- else {
- /* Use whole free space block for new block */
- alloc_free->is_free = FALSE;
- } /* end else */
-
- /* Update page & pool's free size information */
- alloc_page->free_size -= alloc_free->size;
- if (alloc_page->free_blk == alloc_free)
- alloc_page->free_blk = alloc_free->next;
- mp->free_size -= alloc_free->size;
-
- /* Set new space pointer for the return value */
- ret_value = ((unsigned char *)alloc_free) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t));
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MP_malloc() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_free
- *
- * Purpose: Release space in a memory pool
- *
- * Return: NULL on success/NULL on failure
- *
- * Programmer: Quincey Koziol
- * May 3 2005
- *
- * Note: Should we release pages that have no used blocks?
- *
- *-------------------------------------------------------------------------
- */
-void *
-H5MP_free(H5MP_pool_t *mp, void *spc)
-{
- H5MP_page_blk_t *spc_blk; /* Block for space to free */
- H5MP_page_t * spc_page; /* Page containing block to free */
- void * ret_value = NULL; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Sanity check */
- HDassert(mp);
- HDassert(spc);
-
- /* Get block header for space to free */
- spc_blk =
- (H5MP_page_blk_t *)((void *)(((unsigned char *)spc) - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t))));
-
- /* Mark block as free */
- HDassert(spc_blk->is_free == FALSE);
- spc_blk->is_free = TRUE;
-
- /* Add it's space to the amount of free space in the page & pool */
- spc_page = spc_blk->page;
- spc_page->free_size += spc_blk->size;
- mp->free_size += spc_blk->size;
-
- /* Move page with newly freed space to front of list of pages in pool */
- if (spc_page != mp->first) {
- /* Remove page from list */
- spc_page->prev->next = spc_page->next;
- if (spc_page->next)
- spc_page->next->prev = spc_page->prev;
-
- /* Insert page at beginning of list */
- spc_page->prev = NULL;
- spc_page->next = mp->first;
- mp->first->prev = spc_page;
- mp->first = spc_page;
- } /* end if */
-
- /* Check if block can be merged with free space after it on page */
- if (spc_blk->next != NULL) {
- H5MP_page_blk_t *next_blk; /* Block following space to free */
-
- next_blk = spc_blk->next;
- HDassert(next_blk->prev == spc_blk);
- if (next_blk->is_free) {
- spc_blk->size += next_blk->size;
- spc_blk->next = next_blk->next;
- } /* end if */
- } /* end if */
-
- /* Check if block can be merged with free space before it on page */
- if (spc_blk->prev != NULL) {
- H5MP_page_blk_t *prev_blk; /* Block before space to free */
-
- prev_blk = spc_blk->prev;
- HDassert(prev_blk->next == spc_blk);
- if (prev_blk->is_free) {
- prev_blk->size += spc_blk->size;
- prev_blk->next = spc_blk->next;
- } /* end if */
- } /* end if */
-
- /* Check if the block freed becomes the first free block on the page */
- if (spc_page->free_blk == NULL || spc_blk < spc_page->free_blk)
- spc_page->free_blk = spc_blk;
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MP_free() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_close
- *
- * Purpose: Release all memory for a pool and destroy pool
- *
- * Return: Non-negative on success/negative on failure
- *
- * Programmer: Quincey Koziol
- * May 3 2005
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5MP_close(H5MP_pool_t *mp)
-{
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI(FAIL)
-
- /* Release memory for pool pages */
- if (mp->first != NULL) {
- H5MP_page_t *page, *next_page; /* Pointer to pages in pool */
-
- /* Iterate through pages, releasing them */
- page = mp->first;
- while (page) {
- next_page = page->next;
-
- /* Free the page appropriately */
- if (page->fac_alloc)
- page = (H5MP_page_t *)H5FL_FAC_FREE(mp->page_fac, page);
- else
- page = (H5MP_page_t *)H5MM_xfree(page);
-
- page = next_page;
- } /* end while */
- } /* end if */
-
- /* Release page factory */
- if (mp->page_fac)
- if (H5FL_fac_term(mp->page_fac) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't destroy page factory")
-
-done:
- /* Free the memory pool itself */
- mp = H5FL_FREE(H5MP_pool_t, mp);
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MP_close() */
diff --git a/src/H5MPmodule.h b/src/H5MPmodule.h
deleted file mode 100644
index 8e34598..0000000
--- a/src/H5MPmodule.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Programmer: Quincey Koziol
- * Saturday, September 12, 2015
- *
- * Purpose: This file contains declarations which define macros for the
- * H5MP package. Including this header means that the source file
- * is part of the H5MP package.
- */
-#ifndef H5MPmodule_H
-#define H5MPmodule_H
-
-/* Define the proper control macros for the generic FUNC_ENTER/LEAVE and error
- * reporting macros.
- */
-#define H5MP_MODULE
-#define H5_MY_PKG H5MP
-#define H5_MY_PKG_ERR H5E_RESOURCE
-#define H5_MY_PKG_INIT NO
-
-#endif /* H5MPmodule_H */
diff --git a/src/H5MPpkg.h b/src/H5MPpkg.h
deleted file mode 100644
index 64c5293..0000000
--- a/src/H5MPpkg.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- * Programmer: Quincey Koziol
- * Monday, May 2, 2005
- *
- * Purpose: This file contains declarations which are visible only within
- * the H5MP package. Source files outside the H5MP package should
- * include H5MPprivate.h instead.
- */
-#if !(defined H5MP_FRIEND || defined H5MP_MODULE)
-#error "Do not include this file outside the H5MP package!"
-#endif
-
-#ifndef H5MPpkg_H
-#define H5MPpkg_H
-
-/* Get package's private header */
-#include "H5MPprivate.h" /* Memory Pools */
-
-/* Other private headers needed by this file */
-#include "H5FLprivate.h" /* Free Lists */
-
-/**************************/
-/* Package Private Macros */
-/**************************/
-
-/* Alignment macros */
-/* (Ideas from Apache APR :-) */
-
-/* Default alignment necessary */
-#define H5MP_BLOCK_ALIGNMENT 8
-
-/* General alignment macro */
-/* (this only works for aligning to power of 2 boundary) */
-#define H5MP_ALIGN(x, a) (((x) + ((size_t)(a)) - 1) & ~(((size_t)(a)) - 1))
-
-/* Default alignment */
-#define H5MP_BLOCK_ALIGN(x) H5MP_ALIGN(x, H5MP_BLOCK_ALIGNMENT)
-
-/****************************/
-/* Package Private Typedefs */
-/****************************/
-
-/* Free block in pool */
-typedef struct H5MP_page_blk_t {
- size_t size; /* Size of block (includes this H5MP_page_blk_t info) */
- unsigned is_free : 1; /* Flag to indicate the block is free */
- struct H5MP_page_t * page; /* Pointer to page block is located in */
- struct H5MP_page_blk_t *prev; /* Pointer to previous block in page */
- struct H5MP_page_blk_t *next; /* Pointer to next block in page */
-} H5MP_page_blk_t;
-
-/* Memory pool page */
-typedef struct H5MP_page_t {
- size_t free_size; /* Total amount of free space in page */
- unsigned fac_alloc : 1; /* Flag to indicate the page was allocated by the pool's factory */
- H5MP_page_blk_t * free_blk; /* Pointer to first free block in page */
- struct H5MP_page_t *next; /* Pointer to next page in pool */
- struct H5MP_page_t *prev; /* Pointer to previous page in pool */
-} H5MP_page_t;
-
-/* Memory pool header */
-struct H5MP_pool_t {
- H5FL_fac_head_t *page_fac; /* Free-list factory for pages */
- size_t page_size; /* Page size for pool */
- size_t free_size; /* Total amount of free space in pool */
- size_t max_size; /* Maximum block that will fit in a standard page */
- H5MP_page_t * first; /* Pointer to first page in pool */
- unsigned flags; /* Bit flags for pool settings */
-};
-
-/*****************************************/
-/* Package Private Variable Declarations */
-/*****************************************/
-
-/******************************/
-/* Package Private Prototypes */
-/******************************/
-#ifdef H5MP_TESTING
-H5_DLL herr_t H5MP_get_pool_free_size(const H5MP_pool_t *mp, size_t *free_size);
-H5_DLL htri_t H5MP_pool_is_free_size_correct(const H5MP_pool_t *mp);
-H5_DLL herr_t H5MP_get_pool_first_page(const H5MP_pool_t *mp, H5MP_page_t **page);
-H5_DLL herr_t H5MP_get_page_free_size(const H5MP_page_t *mp, size_t *page);
-H5_DLL herr_t H5MP_get_page_next_page(const H5MP_page_t *page, H5MP_page_t **next_page);
-#endif /* H5MP_TESTING */
-
-#endif /* H5MPpkg_H */
diff --git a/src/H5MPprivate.h b/src/H5MPprivate.h
deleted file mode 100644
index 2b06650..0000000
--- a/src/H5MPprivate.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*-------------------------------------------------------------------------
- *
- * Created: H5MPprivate.h
- * May 2 2005
- * Quincey Koziol
- *
- * Purpose: Private header for memory pool routines.
- *
- *-------------------------------------------------------------------------
- */
-
-#ifndef H5MPprivate_H
-#define H5MPprivate_H
-
-/* Include package's public header (not yet) */
-/* #include "H5MPpublic.h" */
-
-/* Private headers needed by this file */
-
-/**************************/
-/* Library Private Macros */
-/**************************/
-
-/* Pool creation flags */
-/* Default settings */
-#define H5MP_FLG_DEFAULT 0
-#define H5MP_PAGE_SIZE_DEFAULT 4096 /* (bytes) */
-
-/****************************/
-/* Library Private Typedefs */
-/****************************/
-
-/* Memory pool header (defined in H5MPpkg.c) */
-typedef struct H5MP_pool_t H5MP_pool_t;
-
-/***************************************/
-/* Library-private Function Prototypes */
-/***************************************/
-H5_DLL H5MP_pool_t *H5MP_create(size_t page_size, unsigned flags);
-H5_DLL void * H5MP_malloc(H5MP_pool_t *mp, size_t request);
-H5_DLL void * H5MP_free(H5MP_pool_t *mp, void *spc);
-H5_DLL herr_t H5MP_close(H5MP_pool_t *mp);
-
-#endif /* H5MPprivate_H */
diff --git a/src/H5MPtest.c b/src/H5MPtest.c
deleted file mode 100644
index 27e7bbe..0000000
--- a/src/H5MPtest.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/* Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- * Purpose: Memory pool testing functions.
- */
-
-#include "H5MPmodule.h" /* This source code file is part of the H5MP module */
-#define H5MP_TESTING /*include H5MP testing funcs*/
-
-/* Private headers */
-#include "H5private.h" /* Generic Functions */
-#include "H5MPpkg.h" /* Memory Pools */
-#include "H5Eprivate.h" /* Error handling */
-
-/* Static Prototypes */
-
-/* Package variables */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_get_pool_free_size
- *
- * Purpose: Retrieve the total amount of free space in entire pool
- *
- * Return: Success: non-negative
- *
- * Failure: negative
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5MP_get_pool_free_size(const H5MP_pool_t *mp, size_t *free_size)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check arguments. */
- HDassert(mp);
- HDassert(free_size);
-
- /* Get memory pool's free space */
- *free_size = mp->free_size;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MP_get_pool_free_size() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_get_pool_first_page
- *
- * Purpose: Retrieve the first page in a memory pool
- *
- * Return: Success: non-negative
- *
- * Failure: negative
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5MP_get_pool_first_page(const H5MP_pool_t *mp, H5MP_page_t **page)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check arguments. */
- HDassert(mp);
- HDassert(page);
-
- /* Get memory pool's first page */
- *page = mp->first;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MP_get_pool_first_page() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_pool_is_free_size_correct
- *
- * Purpose: Check that the free space reported in each page corresponds
- * to the free size in each page and that the free space in the
- * free blocks for a page corresponds with the free space for
- * the page.
- *
- * Return: Success: non-negative
- *
- * Failure: negative
- *
- * Programmer: Quincey Koziol
- * Wednesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-htri_t
-H5MP_pool_is_free_size_correct(const H5MP_pool_t *mp)
-{
- H5MP_page_t *page; /* Pointer to current page */
- size_t pool_free; /* Size of pages' free space */
- htri_t ret_value = TRUE; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check arguments. */
- HDassert(mp);
-
- /* Iterate through pages, checking the free size & accumulating the
- * free space for all the pages */
- page = mp->first;
- pool_free = 0;
- while (page != NULL) {
- H5MP_page_blk_t *blk; /* Pointer to current free block */
- size_t page_free; /* Size of blocks on free list */
-
- /* Iterate through the blocks in page, accumulating free space */
- blk = (H5MP_page_blk_t *)((void *)((unsigned char *)page + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))));
- page_free = 0;
- while (blk != NULL) {
- if (blk->is_free)
- page_free += blk->size;
- blk = blk->next;
- } /* end while */
-
- /* Check that the free space from the blocks on the free list
- * corresponds to space in page */
- if (page_free != page->free_size)
- HGOTO_DONE(FALSE)
-
- /* Increment the amount of free space in pool */
- pool_free += page->free_size;
-
- /* Advance to next page */
- page = page->next;
- } /* end while */
-
- /* Check that the free space from the pages
- * corresponds to free space in pool */
- if (pool_free != mp->free_size)
- HGOTO_DONE(FALSE)
-
-done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* H5MP_pool_is_free_size_correct() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_get_page_free_size
- *
- * Purpose: Retrieve the amount of free space in given page
- *
- * Return: Success: non-negative
- *
- * Failure: negative
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5MP_get_page_free_size(const H5MP_page_t *page, size_t *free_size)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check arguments. */
- HDassert(page);
- HDassert(free_size);
-
- /* Get memory page's free space */
- *free_size = page->free_size;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MP_get_page_free_size() */
-
-/*-------------------------------------------------------------------------
- * Function: H5MP_get_page_next_page
- *
- * Purpose: Retrieve the next page in the pool
- *
- * Return: Success: non-negative
- *
- * Failure: negative
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-herr_t
-H5MP_get_page_next_page(const H5MP_page_t *page, H5MP_page_t **next_page)
-{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Check arguments. */
- HDassert(page);
- HDassert(next_page);
-
- /* Get next memory page */
- *next_page = page->next;
-
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MP_get_page_next_page() */
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index ba47da3..c7586cc 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -346,7 +346,7 @@ H5O__cache_deserialize(const void *image, size_t len, void *_udata, hbool_t *dir
done:
/* Release the [possibly partially initialized] object header on errors */
if (!ret_value && oh)
- if (H5O__free(oh) < 0)
+ if (H5O__free(oh, FALSE) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, NULL, "unable to destroy object header data")
FUNC_LEAVE_NOAPI(ret_value)
@@ -639,7 +639,7 @@ H5O__cache_free_icr(void *_thing)
HDassert(oh->cache_info.type == H5AC_OHDR);
/* Destroy object header */
- if (H5O__free(oh) < 0)
+ if (H5O__free(oh, FALSE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "can't destroy object header")
done:
@@ -1242,7 +1242,7 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata)
/* Save the object header for later use in 'deserialize' callback */
udata->oh = oh;
- if (H5O__free(saved_oh) < 0)
+ if (H5O__free(saved_oh, FALSE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "can't destroy object header")
udata->free_oh = FALSE;
}
@@ -1255,7 +1255,7 @@ H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata)
done:
/* Release the [possibly partially initialized] object header on errors */
if (ret_value < 0 && oh)
- if (H5O__free(oh) < 0)
+ if (H5O__free(oh, FALSE) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to destroy object header data")
FUNC_LEAVE_NOAPI(ret_value)
diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c
index 05dfc72..0b0bb55 100644
--- a/src/H5Ocopy.c
+++ b/src/H5Ocopy.c
@@ -33,7 +33,6 @@
#include "H5Aprivate.h" /* Attributes */
#include "H5CXprivate.h" /* API Contexts */
#include "H5Eprivate.h" /* Error handling */
-#include "H5ESprivate.h" /* Event Sets */
#include "H5FLprivate.h" /* Free lists */
#include "H5Iprivate.h" /* IDs */
#include "H5HGprivate.h" /* Global Heaps */
@@ -772,7 +771,7 @@ done:
/* Free destination object header on failure */
if (ret_value < 0) {
if (oh_dst && !inserted) {
- if (H5O__free(oh_dst) < 0)
+ if (H5O__free(oh_dst, TRUE) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header data")
if (H5O_loc_reset(oloc_dst) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to destroy object header data")
diff --git a/src/H5Ocopy_ref.c b/src/H5Ocopy_ref.c
index f1f8aaf..1cda3ea 100644
--- a/src/H5Ocopy_ref.c
+++ b/src/H5Ocopy_ref.c
@@ -288,21 +288,22 @@ H5O__copy_expand_ref_object2(H5O_loc_t *src_oloc, hid_t tid_src, const H5T_t *dt
size_t nbytes_src, H5O_loc_t *dst_oloc, H5G_loc_t *dst_root_loc, void *buf_dst,
size_t ref_count, H5O_copy_t *cpy_info)
{
- H5T_t * dt_mem = NULL; /* Memory datatype */
- H5T_t * dt_dst = NULL; /* Destination datatype */
- hid_t tid_mem = H5I_INVALID_HID; /* Datatype ID for memory datatype */
- hid_t tid_dst = H5I_INVALID_HID; /* Datatype ID for memory datatype */
- H5T_path_t *tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */
- size_t i; /* Local index variable */
- hbool_t reg_tid_src = (tid_src == H5I_INVALID_HID);
- hid_t dst_loc_id = H5I_INVALID_HID;
- void * conv_buf = NULL; /* Buffer for converting data */
- size_t conv_buf_size = 0; /* Buffer size */
- void * reclaim_buf = NULL; /* Buffer for reclaiming data */
- H5S_t * buf_space = NULL; /* Dataspace describing buffer */
- hsize_t buf_dim[1] = {ref_count}; /* Dimension for buffer */
- size_t token_size = H5F_SIZEOF_ADDR(src_oloc->file);
- herr_t ret_value = SUCCEED;
+ H5T_t * dt_mem = NULL; /* Memory datatype */
+ H5T_t * dt_dst = NULL; /* Destination datatype */
+ hid_t tid_mem = H5I_INVALID_HID; /* Datatype ID for memory datatype */
+ hid_t tid_dst = H5I_INVALID_HID; /* Datatype ID for memory datatype */
+ H5T_path_t * tpath_src_mem = NULL, *tpath_mem_dst = NULL; /* Datatype conversion paths */
+ size_t i; /* Local index variable */
+ hbool_t reg_tid_src = (tid_src == H5I_INVALID_HID);
+ hid_t dst_loc_id = H5I_INVALID_HID;
+ void * conv_buf = NULL; /* Buffer for converting data */
+ size_t conv_buf_size = 0; /* Buffer size */
+ void * reclaim_buf = NULL; /* Buffer for reclaiming data */
+ H5S_t * buf_space = NULL; /* Dataspace describing buffer */
+ hsize_t buf_dim[1] = {ref_count}; /* Dimension for buffer */
+ size_t token_size = H5F_SIZEOF_ADDR(src_oloc->file);
+ const unsigned char zeros[H5R_REF_BUF_SIZE] = {0};
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
@@ -353,29 +354,34 @@ H5O__copy_expand_ref_object2(H5O_loc_t *src_oloc, hid_t tid_src, const H5T_t *dt
/* Making equivalent references in the destination file */
for (i = 0; i < ref_count; i++) {
- H5R_ref_t * ref_ptr = (H5R_ref_t *)conv_buf;
- H5R_ref_priv_t *ref = (H5R_ref_priv_t *)&ref_ptr[i];
- H5O_token_t tmp_token = {0};
-
- /* Get src object address */
- if (H5R__get_obj_token(ref, &tmp_token, &token_size) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to get object token")
- if (H5VL_native_token_to_addr(src_oloc->file, H5I_FILE, tmp_token, &src_oloc->addr) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTUNSERIALIZE, FAIL, "can't deserialize object token into address")
-
- /* Attempt to copy object from source to destination file */
- if (H5O__copy_obj_by_ref(src_oloc, dst_oloc, dst_root_loc, cpy_info) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to copy object")
-
- /* Set dst object address */
- if (H5VL_native_addr_to_token(dst_oloc->file, H5I_FILE, dst_oloc->addr, &tmp_token) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "can't serialize address into object token")
- if (H5R__set_obj_token(ref, (const H5O_token_t *)&tmp_token, token_size) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "unable to set object token")
- /* Do not set app_ref since references are released once the copy is done */
- if (H5R__set_loc_id(ref, dst_loc_id, TRUE, FALSE) < 0)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "unable to set destination loc id")
- } /* end for */
+ H5R_ref_t * ref_ptr = (H5R_ref_t *)conv_buf;
+ H5R_ref_priv_t *ref = (H5R_ref_priv_t *)&ref_ptr[i];
+
+ /* Check for null reference - only expand reference if it is not null */
+ if (HDmemcmp(ref, zeros, H5R_REF_BUF_SIZE)) {
+ H5O_token_t tmp_token = {0};
+
+ /* Get src object address */
+ if (H5R__get_obj_token(ref, &tmp_token, &token_size) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to get object token")
+ if (H5VL_native_token_to_addr(src_oloc->file, H5I_FILE, tmp_token, &src_oloc->addr) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTUNSERIALIZE, FAIL,
+ "can't deserialize object token into address")
+
+ /* Attempt to copy object from source to destination file */
+ if (H5O__copy_obj_by_ref(src_oloc, dst_oloc, dst_root_loc, cpy_info) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to copy object")
+
+ /* Set dst object address */
+ if (H5VL_native_addr_to_token(dst_oloc->file, H5I_FILE, dst_oloc->addr, &tmp_token) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSERIALIZE, FAIL, "can't serialize address into object token")
+ if (H5R__set_obj_token(ref, (const H5O_token_t *)&tmp_token, token_size) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "unable to set object token")
+ /* Do not set app_ref since references are released once the copy is done */
+ if (H5R__set_loc_id(ref, dst_loc_id, TRUE, FALSE) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "unable to set destination loc id")
+ } /* end if */
+ } /* end for */
/* Copy into another buffer, to reclaim memory later */
if (NULL == (reclaim_buf = H5FL_BLK_MALLOC(type_conv, conv_buf_size)))
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
index fa49924..9af79f4 100644
--- a/src/H5Odtype.c
+++ b/src/H5Odtype.c
@@ -1731,7 +1731,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_NO_CLASS:
case H5T_NCLASSES:
default:
- HDsprintf(buf, "H5T_CLASS_%d", (int)(dt->shared->type));
+ HDsnprintf(buf, sizeof(buf), "H5T_CLASS_%d", (int)(dt->shared->type));
s = buf;
break;
} /* end switch */
@@ -1746,7 +1746,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
"Number of members:", dt->shared->u.compnd.nmembs);
for (i = 0; i < dt->shared->u.compnd.nmembs; i++) {
- HDsprintf(buf, "Member %u:", i);
+ HDsnprintf(buf, sizeof(buf), "Member %u:", i);
HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, buf, dt->shared->u.compnd.memb[i].name);
HDfprintf(stream, "%*s%-*s %lu\n", indent + 3, "", MAX(0, fwidth - 3),
"Byte offset:", (unsigned long)(dt->shared->u.compnd.memb[i].offset));
@@ -1759,7 +1759,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
"Number of members:", dt->shared->u.enumer.nmembs);
for (i = 0; i < dt->shared->u.enumer.nmembs; i++) {
- HDsprintf(buf, "Member %u:", i);
+ HDsnprintf(buf, sizeof(buf), "Member %u:", i);
HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, buf, dt->shared->u.enumer.name[i]);
HDfprintf(stream, "%*s%-*s 0x", indent, "", fwidth, "Raw bytes of value:");
for (k = 0; k < dt->shared->parent->shared->size; k++)
@@ -1799,13 +1799,14 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_CSET_RESERVED_13:
case H5T_CSET_RESERVED_14:
case H5T_CSET_RESERVED_15:
- HDsprintf(buf, "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.cset));
+ HDsnprintf(buf, sizeof(buf), "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.cset));
s = buf;
break;
case H5T_CSET_ERROR:
default:
- HDsprintf(buf, "Unknown character set: %d", (int)(dt->shared->u.atomic.u.s.cset));
+ HDsnprintf(buf, sizeof(buf), "Unknown character set: %d",
+ (int)(dt->shared->u.atomic.u.s.cset));
s = buf;
break;
} /* end switch */
@@ -1837,13 +1838,14 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_STR_RESERVED_13:
case H5T_STR_RESERVED_14:
case H5T_STR_RESERVED_15:
- HDsprintf(buf, "H5T_STR_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.pad));
+ HDsnprintf(buf, sizeof(buf), "H5T_STR_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.pad));
s = buf;
break;
case H5T_STR_ERROR:
default:
- HDsprintf(buf, "Unknown string padding: %d", (int)(dt->shared->u.atomic.u.s.pad));
+ HDsnprintf(buf, sizeof(buf), "Unknown string padding: %d",
+ (int)(dt->shared->u.atomic.u.s.pad));
s = buf;
break;
} /* end switch */
@@ -1862,7 +1864,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_VLEN_BADTYPE:
case H5T_VLEN_MAXTYPE:
default:
- HDsprintf(buf, "H5T_VLEN_%d", dt->shared->u.vlen.type);
+ HDsnprintf(buf, sizeof(buf), "H5T_VLEN_%d", dt->shared->u.vlen.type);
s = buf;
break;
} /* end switch */
@@ -1880,7 +1882,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_LOC_BADLOC:
case H5T_LOC_MAXLOC:
default:
- HDsprintf(buf, "H5T_LOC_%d", (int)dt->shared->u.vlen.loc);
+ HDsnprintf(buf, sizeof(buf), "H5T_LOC_%d", (int)dt->shared->u.vlen.loc);
s = buf;
break;
} /* end switch */
@@ -1911,13 +1913,13 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_CSET_RESERVED_13:
case H5T_CSET_RESERVED_14:
case H5T_CSET_RESERVED_15:
- HDsprintf(buf, "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.vlen.cset));
+ HDsnprintf(buf, sizeof(buf), "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.vlen.cset));
s = buf;
break;
case H5T_CSET_ERROR:
default:
- HDsprintf(buf, "Unknown character set: %d", (int)(dt->shared->u.vlen.cset));
+ HDsnprintf(buf, sizeof(buf), "Unknown character set: %d", (int)(dt->shared->u.vlen.cset));
s = buf;
break;
} /* end switch */
@@ -1949,13 +1951,13 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_STR_RESERVED_13:
case H5T_STR_RESERVED_14:
case H5T_STR_RESERVED_15:
- HDsprintf(buf, "H5T_STR_RESERVED_%d", (int)(dt->shared->u.vlen.pad));
+ HDsnprintf(buf, sizeof(buf), "H5T_STR_RESERVED_%d", (int)(dt->shared->u.vlen.pad));
s = buf;
break;
case H5T_STR_ERROR:
default:
- HDsprintf(buf, "Unknown string padding: %d", (int)(dt->shared->u.vlen.pad));
+ HDsnprintf(buf, sizeof(buf), "Unknown string padding: %d", (int)(dt->shared->u.vlen.pad));
s = buf;
break;
} /* end switch */
@@ -1995,7 +1997,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_ORDER_ERROR:
default:
- HDsprintf(buf, "H5T_ORDER_%d", dt->shared->u.atomic.order);
+ HDsnprintf(buf, sizeof(buf), "H5T_ORDER_%d", dt->shared->u.atomic.order);
s = buf;
break;
} /* end switch */
@@ -2069,9 +2071,9 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_NPAD:
default:
if (dt->shared->u.atomic.u.f.pad < 0)
- HDsprintf(buf, "H5T_PAD_%d", -(dt->shared->u.atomic.u.f.pad));
+ HDsnprintf(buf, sizeof(buf), "H5T_PAD_%d", -(dt->shared->u.atomic.u.f.pad));
else
- HDsprintf(buf, "bit-%d", dt->shared->u.atomic.u.f.pad);
+ HDsnprintf(buf, sizeof(buf), "bit-%d", dt->shared->u.atomic.u.f.pad);
s = buf;
break;
} /* end switch */
@@ -2092,7 +2094,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_NORM_ERROR:
default:
- HDsprintf(buf, "H5T_NORM_%d", (int)(dt->shared->u.atomic.u.f.norm));
+ HDsnprintf(buf, sizeof(buf), "H5T_NORM_%d", (int)(dt->shared->u.atomic.u.f.norm));
s = buf;
} /* end switch */
HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth, "Normalization:", s);
@@ -2129,7 +2131,7 @@ H5O__dtype_debug(H5F_t *f, const void *mesg, FILE *stream, int indent, int fwidt
case H5T_SGN_ERROR:
case H5T_NSGN:
default:
- HDsprintf(buf, "H5T_SGN_%d", (int)(dt->shared->u.atomic.u.i.sign));
+ HDsnprintf(buf, sizeof(buf), "H5T_SGN_%d", (int)(dt->shared->u.atomic.u.i.sign));
s = buf;
break;
} /* end switch */
diff --git a/src/H5Oint.c b/src/H5Oint.c
index ee79b0c..2348790 100644
--- a/src/H5Oint.c
+++ b/src/H5Oint.c
@@ -289,7 +289,7 @@ H5O_create(H5F_t *f, size_t size_hint, size_t initial_rc, hid_t ocpl_id, H5O_loc
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "Can't apply object header to file")
done:
- if ((FAIL == ret_value) && (NULL != oh) && (H5O__free(oh) < 0))
+ if ((FAIL == ret_value) && (NULL != oh) && (H5O__free(oh, TRUE) < 0))
HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "can't delete object header")
FUNC_LEAVE_NOAPI(ret_value)
@@ -353,7 +353,7 @@ H5O_create_ohdr(H5F_t *f, hid_t ocpl_id)
ret_value = oh;
done:
- if ((NULL == ret_value) && (NULL != oh) && (H5O__free(oh) < 0))
+ if ((NULL == ret_value) && (NULL != oh) && (H5O__free(oh, TRUE) < 0))
HDONE_ERROR(H5E_OHDR, H5E_CANTFREE, NULL, "can't delete object header")
FUNC_LEAVE_NOAPI(ret_value)
@@ -3014,7 +3014,7 @@ H5O_get_proxy(const H5O_t *oh)
*-------------------------------------------------------------------------
*/
herr_t
-H5O__free(H5O_t *oh)
+H5O__free(H5O_t *oh, hbool_t force)
{
unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
@@ -3038,10 +3038,12 @@ H5O__free(H5O_t *oh)
for (u = 0; u < oh->nmesgs; u++) {
#ifndef NDEBUG
/* Verify that message is clean, unless it could have been marked
- * dirty by decoding */
+ * dirty by decoding, or if this is a forced free (in case of
+ * failure during creation of the object some messages may be dirty)
+ */
if (oh->ndecode_dirtied && oh->mesg[u].dirty)
oh->ndecode_dirtied--;
- else
+ else if (!force)
HDassert(oh->mesg[u].dirty == 0);
#endif /* NDEBUG */
diff --git a/src/H5Opkg.h b/src/H5Opkg.h
index ebfe636..1fe918d 100644
--- a/src/H5Opkg.h
+++ b/src/H5Opkg.h
@@ -551,7 +551,7 @@ H5_DLL herr_t H5O__visit(H5G_loc_t *loc, const char *obj_name, H5_index_t idx_ty
H5O_iterate2_t op, void *op_data, unsigned fields);
H5_DLL herr_t H5O__inc_rc(H5O_t *oh);
H5_DLL herr_t H5O__dec_rc(H5O_t *oh);
-H5_DLL herr_t H5O__free(H5O_t *oh);
+H5_DLL herr_t H5O__free(H5O_t *oh, hbool_t force);
/* Object header message routines */
H5_DLL herr_t H5O__msg_alloc(H5F_t *f, H5O_t *oh, const H5O_msg_class_t *type, unsigned *mesg_flags,
diff --git a/src/H5Opublic.h b/src/H5Opublic.h
index b05a2a8..70f451e 100644
--- a/src/H5Opublic.h
+++ b/src/H5Opublic.h
@@ -510,7 +510,7 @@ H5_DLL herr_t H5Oget_info3(hid_t loc_id, H5O_info2_t *oinfo, unsigned fields);
* location and relative name
*
* \fgdta_loc_obj_id{loc_id}
- * \param[in] name Name of group, relative to \p loc_id
+ * \param[in] name Name of object, relative to \p loc_id
* \param[out] oinfo Buffer in which to return object information
* \param[in] fields Flags specifying the fields to include in \p oinfo
* \lapl_id
@@ -1834,7 +1834,7 @@ H5_DLL herr_t H5Oget_info1(hid_t loc_id, H5O_info1_t *oinfo);
* by location and relative name
*
* \fgdta_loc_obj_id{loc_id}
- * \param[in] name Name of group, relative to \p loc_id
+ * \param[in] name Name of object, relative to \p loc_id
* \param[out] oinfo Buffer in which to return object information
* \lapl_id
*
@@ -1960,7 +1960,7 @@ H5_DLL herr_t H5Oget_info2(hid_t loc_id, H5O_info1_t *oinfo, unsigned fields);
* by location and relative name
*
* \fgdta_loc_obj_id{loc_id}
- * \param[in] name Name of group, relative to \p loc_id
+ * \param[in] name Name of object, relative to \p loc_id
* \param[out] oinfo Buffer in which to return object information
* \param[in] fields Flags specifying the fields to include in \p oinfo
* \lapl_id
diff --git a/src/H5PB.c b/src/H5PB.c
index 9ab87b0..ce8336b 100644
--- a/src/H5PB.c
+++ b/src/H5PB.c
@@ -1303,6 +1303,75 @@ done:
} /* end H5PB_write() */
/*-------------------------------------------------------------------------
+ * Function: H5PB_enabled
+ *
+ * Purpose: Check if the page buffer may be enabled for the specified
+ * file and data access type.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Neil Fortner
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_enabled(H5F_shared_t *f_sh, H5FD_mem_t type, hbool_t *enabled)
+{
+ H5PB_t *page_buf; /* Page buffering info for this file */
+ hbool_t bypass_pb = FALSE; /* Whether to bypass page buffering */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ /* Sanity checks */
+ HDassert(f_sh);
+
+ /* Get pointer to page buffer info for this file */
+ page_buf = f_sh->page_buf;
+
+#ifdef H5_HAVE_PARALLEL
+ if (H5F_SHARED_HAS_FEATURE(f_sh, H5FD_FEAT_HAS_MPI)) {
+#if 1
+ bypass_pb = TRUE;
+#else
+ /* MSC - why this stopped working ? */
+ int mpi_size;
+
+ if ((mpi_size = H5F_shared_mpi_get_size(f_sh)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "can't retrieve MPI communicator size")
+ if (1 != mpi_size)
+ bypass_pb = TRUE;
+#endif
+ } /* end if */
+#endif
+
+ /* If page buffering is disabled, or if this is a parallel raw data access,
+ * bypass page buffering. Note that page buffering may still be disabled for
+ * large metadata access or large non-parallel raw data access, but this
+ * function doesn't take I/O size into account so if it returns TRUE the
+ * page buffer may still be disabled for some I/O. If it returns FALSE it is
+ * always disabled for this access type.
+ */
+ if (NULL == page_buf || (bypass_pb && H5FD_MEM_DRAW == type)) {
+ /* Update statistics, since wherever this function is called, if it
+ * returns FALSE, the calling function performs I/O avoiding the page
+ * buffer layer */
+ if (page_buf) {
+ HDassert(type == H5FD_MEM_DRAW);
+ page_buf->bypasses[1]++;
+ } /* end if */
+
+ /* Page buffer is disabled, at least for this data access type */
+ *enabled = FALSE;
+ } /* end if */
+ else
+ /* Page buffer may be enabled */
+ *enabled = TRUE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5PB_enabled() */
+
+/*-------------------------------------------------------------------------
* Function: H5PB__insert_entry()
*
* Purpose: This function was created without documentation.
diff --git a/src/H5PBprivate.h b/src/H5PBprivate.h
index e0197bf..0a255fc 100644
--- a/src/H5PBprivate.h
+++ b/src/H5PBprivate.h
@@ -23,11 +23,6 @@
#ifndef H5PBprivate_H
#define H5PBprivate_H
-/* Include package's public header */
-#ifdef NOT_YET
-#include "H5PBpublic.h"
-#endif /* NOT_YET */
-
/* Private headers needed by this header */
#include "H5private.h" /* Generic Functions */
#include "H5Fprivate.h" /* File access */
@@ -91,6 +86,7 @@ H5_DLL herr_t H5PB_update_entry(H5PB_t *page_buf, haddr_t addr, size_t size, con
H5_DLL herr_t H5PB_remove_entry(const H5F_shared_t *f_sh, haddr_t addr);
H5_DLL herr_t H5PB_read(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/);
H5_DLL herr_t H5PB_write(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf);
+H5_DLL herr_t H5PB_enabled(H5F_shared_t *f_sh, H5FD_mem_t type, hbool_t *enabled);
/* Statistics routines */
H5_DLL herr_t H5PB_reset_stats(H5PB_t *page_buf);
diff --git a/src/H5PLpath.c b/src/H5PLpath.c
index 87ff831..b86fd6e 100644
--- a/src/H5PLpath.c
+++ b/src/H5PLpath.c
@@ -709,7 +709,7 @@ H5PL__path_table_iterate_process_path(const char *plugin_path, H5PL_iterate_type
/* Specify a file mask. *.* = We want everything! -
* skip the path if the directory can't be opened */
- HDsprintf(service, "%s\\*.dll", plugin_path);
+ HDsnprintf(service, sizeof(service), "%s\\*.dll", plugin_path);
if ((hFind = FindFirstFileA(service, &fdFile)) == INVALID_HANDLE_VALUE)
HGOTO_DONE(H5_ITER_CONT)
@@ -934,7 +934,7 @@ H5PL__find_plugin_in_path(const H5PL_search_params_t *search_params, hbool_t *fo
*found = FALSE;
/* Specify a file mask. *.* = We want everything! */
- HDsprintf(service, "%s\\*.dll", dir);
+ HDsnprintf(service, sizeof(service), "%s\\*.dll", dir);
if ((hFind = FindFirstFileA(service, &fdFile)) == INVALID_HANDLE_VALUE)
HGOTO_ERROR(H5E_PLUGIN, H5E_OPENERROR, FAIL, "can't open directory")
diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c
index 47c17db..bfac42b 100644
--- a/src/H5Pfapl.c
+++ b/src/H5Pfapl.c
@@ -1185,7 +1185,7 @@ done:
*
* Purpose: Set the file driver (DRIVER_ID) for a file access
* property list (PLIST_ID) and supply an optional
- * struct containing the driver-specific properites
+ * struct containing the driver-specific properties
* (DRIVER_INFO). The driver properties will be copied into the
* property list and the reference count on the driver will be
* incremented, allowing the caller to close the driver ID but
@@ -1248,8 +1248,8 @@ H5P_set_driver_by_name(H5P_genplist_t *plist, const char *driver_name, const cha
FUNC_ENTER_NOAPI(FAIL)
- assert(plist);
- assert(driver_name);
+ HDassert(plist);
+ HDassert(driver_name);
/* Register the driver */
if ((new_driver_id = H5FD_register_driver_by_name(driver_name, app_ref)) < 0)
@@ -1336,8 +1336,8 @@ H5P_set_driver_by_value(H5P_genplist_t *plist, H5FD_class_value_t driver_value,
FUNC_ENTER_NOAPI(FAIL)
- assert(plist);
- assert(driver_value >= 0);
+ HDassert(plist);
+ HDassert(driver_value >= 0);
/* Register the driver */
if ((new_driver_id = H5FD_register_driver_by_value(driver_value, app_ref)) < 0)
@@ -5284,15 +5284,14 @@ H5P__decode_coll_md_read_flag_t(const void **_pp, void *_value)
* Function: H5Pset_all_coll_metadata_ops
*
* Purpose: Tell the library whether the metadata read operations will
- * be done collectively (1) or not (0). Default is independent.
- * With collective mode, the library will optimize access to
- * metadata operations on the file.
+ * be done collectively (1) or not (0). Default is independent.
+ * With collective mode, the library will optimize access to
+ * metadata operations on the file.
*
* Note: This routine accepts file access property lists, link
- * access property lists, attribute access property lists,
- * dataset access property lists, group access property lists,
- * named datatype access property lists,
- * and dataset transfer property lists.
+ * access property lists, attribute access property lists,
+ * dataset access property lists, group access property lists
+ * and named datatype access property lists.
*
* Return: Non-negative on success/Negative on failure
*
@@ -5312,7 +5311,7 @@ H5Pset_all_coll_metadata_ops(hid_t plist_id, hbool_t is_collective)
H5TRACE2("e", "ib", plist_id, is_collective);
/* Compare the property list's class against the other class */
- /* (Dataset, group, attribute, and named datype access property lists
+ /* (Dataset, group, attribute, and named datatype access property lists
* are sub-classes of link access property lists -QAK)
*/
if (TRUE != H5P_isa_class(plist_id, H5P_LINK_ACCESS) && TRUE != H5P_isa_class(plist_id, H5P_FILE_ACCESS))
@@ -5342,10 +5341,9 @@ done:
* Purpose: Gets information about collective metadata read mode.
*
* Note: This routine accepts file access property lists, link
- * access property lists, attribute access property lists,
- * dataset access property lists, group access property lists,
- * named datatype access property lists,
- * and dataset transfer property lists.
+ * access property lists, attribute access property lists,
+ * dataset access property lists, group access property lists,
+ * and named datatype access property lists.
*
* Return: Non-negative on success/Negative on failure
*
@@ -5363,7 +5361,7 @@ H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collective /*out*/)
H5TRACE2("e", "ix", plist_id, is_collective);
/* Compare the property list's class against the other class */
- /* (Dataset, group, attribute, and named datype access property lists
+ /* (Dataset, group, attribute, and named datatype access property lists
* are sub-classes of link access property lists -QAK)
*/
if (TRUE != H5P_isa_class(plist_id, H5P_LINK_ACCESS) && TRUE != H5P_isa_class(plist_id, H5P_FILE_ACCESS))
diff --git a/src/H5Pmodule.h b/src/H5Pmodule.h
index 6e92e66..66a9574 100644
--- a/src/H5Pmodule.h
+++ b/src/H5Pmodule.h
@@ -111,7 +111,8 @@
*
* \defgroup GAPL General Access Properties
* \ingroup H5P
- * \todo Should this be as standalone page?
+ * The functions in this section can be applied to different kinds of property
+ * lists.
*
* \defgroup GCPL Group Creation Properties
* \ingroup H5P
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index 536407c..d0bc2b8 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -116,21 +116,70 @@ extern "C" {
/* Define property list class callback function pointer types */
//! <!-- [H5P_cls_create_func_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for H5Pcreate_class()
+ *
+ * \param[in] prop_id The identifier of the property list class being created
+ * \param[in] create_data User pointer to any class creation data required
+ * \return \herr_t
+ *
+ * \details This function is called when a new property list of the class
+ * with which this function was registered is being created. The
+ * function is called after any registered parent create function is
+ * called for each property value.
+ *
+ * If the create function returns a negative value, the new list is not
+ * returned to the user and the property list creation routine returns
+ * an error value.
+ *
+ * \since 1.4.0
+ *
*/
typedef herr_t (*H5P_cls_create_func_t)(hid_t prop_id, void *create_data);
//! <!-- [H5P_cls_create_func_t_snip] -->
//! <!-- [H5P_cls_copy_func_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for H5Pcreate_class()
+ *
+ * \param[in] new_prop_id The identifier of the property list copy
+ * \param[in] old_prop_id The identifier of the property list being copied
+ * \param[in] copy_data User pointer to any copy data required
+ * \return \herr_t
+ *
+ * \details This function is called when an existing property list of this
+ * class is copied. The copy callback function is called after any
+ * registered parent copy callback function is called for each property
+ * value.
+ *
+ * If the copy routine returns a negative value, the new list is not
+ * returned to the user and the property list copy function returns an
+ * error value.
+ *
+ * \since 1.4.0
+ *
*/
typedef herr_t (*H5P_cls_copy_func_t)(hid_t new_prop_id, hid_t old_prop_id, void *copy_data);
//! <!-- [H5P_cls_copy_func_t_snip] -->
//! <!-- [H5P_cls_close_func_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for H5Pcreate_class()
+ *
+ * \param[in] prop_id The identifier of the property list class being created
+ * \param[in] close_data User pointer to any close data required
+ * \return \herr_t
+ *
+ * \details This function is called when a property list of the class
+ * with which this function was registered is being closed. The
+ * function is called after any registered parent close function is
+ * called for each property value.
+ *
+ * If the close function returns a negative value, the new list is not
+ * returned to the user and the property list close routine returns
+ * an error value.
+ *
+ * \since 1.4.0
+ *
*/
typedef herr_t (*H5P_cls_close_func_t)(hid_t prop_id, void *close_data);
//! <!-- [H5P_cls_close_func_t_snip] -->
@@ -145,8 +194,8 @@ typedef herr_t (*H5P_cls_close_func_t)(hid_t prop_id, void *close_data);
* \param[in,out] value The value for the property
* \return \herr_t
*
- * \details The H5P_prp_cb1_t() describes the parameters used by the
- * property create,copy and close callback functions.
+ * \details The H5P_prp_cb1_t() function describes the parameters used by the
+ * property create, copy and close callback functions.
*/
typedef herr_t (*H5P_prp_cb1_t)(const char *name, size_t size, void *value);
//! <!-- [H5P_prp_cb1_t_snip] -->
@@ -161,8 +210,8 @@ typedef herr_t (*H5P_prp_cb1_t)(const char *name, size_t size, void *value);
* \param[in] value The value for the property
* \return \herr_t
*
- * \details The H5P_prp_cb2_t() describes the parameters used by the
- * property set ,copy and delete callback functions.
+ * \details The H5P_prp_cb2_t() function describes the parameters used by the
+ * property set, copy and delete callback functions.
*/
typedef herr_t (*H5P_prp_cb2_t)(hid_t prop_id, const char *name, size_t size, void *value);
//! <!-- [H5P_prp_cb2_t_snip] -->
@@ -172,13 +221,28 @@ typedef H5P_prp_cb2_t H5P_prp_set_func_t;
typedef H5P_prp_cb2_t H5P_prp_get_func_t;
//! <!-- [H5P_prp_encode_func_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for encoding property values
+ *
+ * \param[in] value The property value to be encoded
+ * \param[out] buf The encoded property value
+ * \param[out] size The size of \p buf
+ * \return \herr_t
+ *
+ * \note There is currently no public API which exposes a callback of this type.
+ *
*/
typedef herr_t (*H5P_prp_encode_func_t)(const void *value, void **buf, size_t *size);
//! <!-- [H5P_prp_encode_func_t_snip] -->
//! <!-- [H5P_prp_decode_func_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for decoding property values
+ *
+ * \param[in] buf A buffer containing an encoded property value
+ * \param[out] value The decoded property value
+ * \return \herr_t
+ *
+ * \note There is currently no public API which exposes a callback of this type.
+ *
*/
typedef herr_t (*H5P_prp_decode_func_t)(const void **buf, void *value);
//! <!-- [H5P_prp_decode_func_t_snip] -->
@@ -187,7 +251,16 @@ typedef H5P_prp_cb1_t H5P_prp_copy_func_t;
//! <!-- [H5P_prp_compare_func_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for comparing property values
+ *
+ * \param[in] value1 A property value
+ * \param[in] value2 A property value
+ * \param[in] size The size of the \p value1 and \p value2 buffers
+ * \return Returns a positive value if \c value1 is greater than \c value2, a
+ * negative value if \c value2 is greater than \c value1 and zero if
+ * \c value1 and \c value2 are equal.
+ *
+ * \see H5Pregister(), H5Pinsert()
*/
typedef int (*H5P_prp_compare_func_t)(const void *value1, const void *value2, size_t size);
//! <!-- [H5P_prp_compare_func_t_snip] -->
@@ -197,7 +270,19 @@ typedef H5P_prp_cb1_t H5P_prp_close_func_t;
/* Define property list iteration function type */
//! <!-- [H5P_iterate_t_snip] -->
/**
- * \todo Document me!
+ * \brief Callback function for H5Piterate()
+ *
+ * \param[in] id The identifier of a property list or property list class
+ * \param[in] name The name of the current property
+ * \param[in,out] iter_data The user context passed to H5Piterate()
+ * \return \herr_t_iter
+ *
+ * \details This function is called for each property encountered when
+ * iterating over a property list or property list class
+ * via H5Piterate().
+ *
+ * \since 1.4.0
+ *
*/
typedef herr_t (*H5P_iterate_t)(hid_t id, const char *name, void *iter_data);
//! <!-- [H5P_iterate_t_snip] -->
@@ -264,15 +349,15 @@ typedef enum H5D_mpio_no_collective_cause_t {
H5D_MPIO_DATA_TRANSFORMS = 0x04,
/**< Collective I/O was not performed because data transforms needed to be applied */
H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED = 0x08,
- /**< \todo FIXME! */
+ /**< Collective I/O was disabled by environment variable (\Code{HDF5_MPI_OPT_TYPES}) */
H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES = 0x10,
/**< Collective I/O was not performed because one of the dataspaces was neither simple nor scalar */
H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET = 0x20,
/**< Collective I/O was not performed because the dataset was neither contiguous nor chunked */
H5D_MPIO_PARALLEL_FILTERED_WRITES_DISABLED = 0x40,
- /**< \todo FIXME! */
+ /**< Collective I/O was not performed because parallel filtered writes are disabled */
H5D_MPIO_ERROR_WHILE_CHECKING_COLLECTIVE_POSSIBLE = 0x80,
- /**< \todo FIXME! */
+ /**< Error */
H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE = 0x100
/**< Sentinel */
} H5D_mpio_no_collective_cause_t;
@@ -577,77 +662,12 @@ H5_DLL hid_t H5Pcreate(hid_t cls_id);
* those existing properties, only add or remove their own class
* properties. Property list classes defined and supported in the
* HDF5 library distribution are listed and briefly described in
- * H5Pcreate(). The \p create routine is called when a new property
- * list of this class is being created. The #H5P_cls_create_func_t
- * callback function is defined as follows:
- *
- * \snippet this H5P_cls_create_func_t_snip
+ * H5Pcreate(). The \p create, \p copy, \p close functions are called
+ * when a property list of the new class is created, copied, or closed,
+ * respectively.
*
- * The parameters to this callback function are defined as follows:
- * <table>
- * <tr>
- * <td>\ref hid_t \c prop_id</td>
- * <td>IN: The identifier of the property list being created</td>
- * </tr>
- * <tr>
- * <td>\Code{void * create_data}</td>
- * <td>IN: User pointer to any class creation data required</td>
- * </tr>
- * </table>
- *
- * The \p create routine is called after any registered
- * \p create function is called for each property value. If the
- * \p create routine returns a negative value, the new list is not
- * returned to the user and the property list creation routine returns
- * an error value.
- *
- * The \p copy routine is called when an existing property
- * list of this class is copied. The #H5P_cls_copy_func_t callback
- * function is defined as follows:
- * \snippet this H5P_cls_copy_func_t_snip
- *
- * The parameters to this callback function are defined as follows:
- * <table>
- * <tr>
- * <td>\ref hid_t \c prop_id</td>
- * <td>IN: The identifier of the property list created by copying</td>
- * </tr>
- * <tr>
- * <td>\Code{void * copy_data}</td>
- * <td>IN: User pointer to any class copy data required</td>
- * </tr>
- * </table>
- *
- * The \p copy routine is called after any registered \p copy function
- * is called for each property value. If the \p copy routine returns a
- * negative value, the new list is not returned to the user and the
- * property list \p copy routine returns an error value.
- *
- * The \p close routine is called when a property list of this class
- * is being closed. The #H5P_cls_close_func_t callback function is
- * defined as follows:
- * \snippet this H5P_cls_close_func_t_snip
- *
- * The parameters to this callback function are defined as follows:
- * <table>
- * <tr>
- * <td>\ref hid_t \c prop_id</td>
- * <td>IN: The identifier of the property list being closed</td>
- * </tr>
- * <tr>
- * <td>\Code{void * close_data}</td>
- * <td>IN: User pointer to any class close data required</td>
- * </tr>
- * </table>
- *
- * The \p close routine is called before any registered \p close
- * function is called for each property value. If the \p close routine
- * returns a negative value, the property list close routine returns
- * an error value but the property list is still closed.
- *
- * H5Pclose_class() can be used to release the property list class
- * identifier returned by this function so that resources leaks will
- * not develop.
+ * H5Pclose_class() must be used to release the property list class
+ * identifier returned by this function.
*
* \since 1.4.0
*
@@ -1376,35 +1396,12 @@ H5_DLL htri_t H5Pisa_class(hid_t plist_id, hid_t pclass_id);
* returned in this case, the iterator cannot be restarted if
* one of the calls to its operator returns non-zero.
*
- * The prototype for the #H5P_iterate_t operator is as follows:
- * \snippet this H5P_iterate_t_snip
- *
- * The operation receives the property list or class
+ * The operation \p iter_func receives the property list or class
* identifier for the object being iterated over, \p id, the
* name of the current property within the object, \p name,
* and the pointer to the operator data passed in to H5Piterate(),
- * \p iter_data. The valid return values from an operator are
- * as follows:
+ * \p iter_data.
*
- * <table>
- * <tr>
- * <td>Zero</td>
- * <td>Causes the iterator to continue, returning zero when all
- * properties have been processed</td>
- * </tr>
- * <tr>
- * <td>Positive</td>
- * <td>Causes the iterator to immediately return that positive
- * value, indicating short-circuit success. The iterator
- * can be restarted at the index of the next property</td>
- * </tr>
- * <tr>
- * <td>Negative</td>
- * <td>Causes the iterator to immediately return that value,
- * indicating failure. The iterator can be restarted at the
- * index of the next property</td>
- * </tr>
- * </table>
* H5Piterate() assumes that the properties in the object
* identified by \p id remain unchanged through the iteration.
* If the membership changes during the iteration, the function's
@@ -1877,9 +1874,6 @@ H5_DLL herr_t H5Pget_attr_phase_change(hid_t plist_id, unsigned *max_compact, un
*
* \brief Returns information about a filter in a pipeline
*
- * \todo Signature for H5Pget_filter2 is different in H5Pocpl.c than in
- * H5Ppublic.h
- *
* \ocpl_id{plist_id}
* \param[in] idx Sequence number within the filter pipeline of the filter
* for which information is sought
@@ -4205,17 +4199,14 @@ H5_DLL herr_t H5Pset_alignment(hid_t fapl_id, hsize_t threshold, hsize_t alignme
*
* \note Note: Raw dataset chunk caching is not currently
* supported when using the MPI I/O and MPI POSIX file drivers
- * in read/write mode; see H5Pset_fapl_mpio() and
- * H5Pset_fapl_mpiposix(), respectively. When using one of these
- * file drivers, all calls to H5Dread() and H5Dwrite() will access
+ * in read/write mode; see H5Pset_fapl_mpio(). When using this
+ * file driver, all calls to H5Dread() and H5Dwrite() will access
* the disk directly, and H5Pset_cache() will have no effect on
* performance.
*
* \note Raw dataset chunk caching is supported when these drivers are
* used in read-only mode.
*
- * \todo Check on H5Pset_fapl_mpio() and H5Pset_fapl_mpiposix().
- *
* \version 1.8.0 The use of the \p mdc_nelmts parameter was discontinued.
* Metadata cache configuration is managed with
* H5Pset_mdc_config() and H5Pget_mdc_config().
@@ -5483,12 +5474,38 @@ H5_DLL herr_t H5Pset_coll_metadata_write(hid_t plist_id, hbool_t is_collective);
H5_DLL herr_t H5Pget_coll_metadata_write(hid_t plist_id, hbool_t *is_collective);
/**
- * \todo Add missing documentation
+ * \ingroup FAPL
+ *
+ * \brief Get the MPI communicator and info
+ *
+ * \fapl_id
+ * \param[out] comm MPI communicator
+ * \param[out] info MPI info object
+ * \return \herr_t
+ *
+ * \details H5Pget_mpi_params() gets the MPI communicator and info stored in
+ * the file access property list \p fapl_id.
+ *
+ * \todo When was this introduced?
+ *
*/
H5_DLL herr_t H5Pget_mpi_params(hid_t fapl_id, MPI_Comm *comm, MPI_Info *info);
/**
- * \todo Add missing documentation
+ * \ingroup FAPL
+ *
+ * \brief Set the MPI communicator and info
+ *
+ * \fapl_id
+ * \param[in] comm MPI communicator
+ * \param[in] info MPI info object
+ * \return \herr_t
+ *
+ * \details H5Pset_mpi_params() sets the MPI communicator and info stored in
+ * the file access property list \p fapl_id.
+ *
+ * \todo When was this introduced?
+ *
*/
H5_DLL herr_t H5Pset_mpi_params(hid_t fapl_id, MPI_Comm comm, MPI_Info info);
#endif /* H5_HAVE_PARALLEL */
@@ -7100,9 +7117,6 @@ H5_DLL herr_t H5Pget_virtual_printf_gap(hid_t dapl_id, hsize_t *gap_size);
*
* \dapl_id
* \param[out] view The flag specifying the view of the virtual dataset.
- * Valid values are:
- * \li #H5D_VDS_FIRST_MISSING
- * \li #H5D_VDS_LAST_AVAILABLE
*
* \return \herr_t
*
@@ -7456,11 +7470,7 @@ H5_DLL herr_t H5Pset_virtual_printf_gap(hid_t dapl_id, hsize_t gap_size);
*
* \dapl_id
* \param[in] view Flag specifying the extent of the data to be included
- * in the view. Valid values are:
- * \li #H5D_VDS_FIRST_MISSING: View includes all data
- * before the first missing mapped data
- * \li #H5D_VDS_LAST_AVAILABLE View includes all
- * available mapped data
+ * in the view.
*
* \return \herr_t
*
@@ -7628,8 +7638,11 @@ H5_DLL herr_t H5Pget_hyper_vector_size(hid_t fapl_id, size_t *size /*out*/);
* \details H5Pget_preserve() checks the status of the dataset transfer
* property list.
*
+ * \since 1.0.0
+ *
* \version 1.6.0 The flag parameter was changed from INTEGER to LOGICAL to
* better match the C API. (Fortran 90)
+ * \version 1.8.2 Deprecated.
*
*/
H5_DLL int H5Pget_preserve(hid_t plist_id);
@@ -7657,6 +7670,8 @@ H5_DLL int H5Pget_preserve(hid_t plist_id);
*
* Please refer to the function H5Pset_type_conv_cb() for more details.
*
+ * \since 1.8.0
+ *
*/
H5_DLL herr_t H5Pget_type_conv_cb(hid_t dxpl_id, H5T_conv_except_func_t *op, void **operate_data);
/**
@@ -7680,6 +7695,8 @@ H5_DLL herr_t H5Pget_type_conv_cb(hid_t dxpl_id, H5T_conv_except_func_t *op, voi
* H5Pset_vlen_mem_manager(), returning the parameters set by
* that function.
*
+ * \since 1.0.0
+ *
*/
H5_DLL herr_t H5Pget_vlen_mem_manager(hid_t plist_id, H5MM_allocate_t *alloc_func, void **alloc_info,
H5MM_free_t *free_func, void **free_info);
@@ -7923,8 +7940,9 @@ H5_DLL herr_t H5Pset_hyper_vector_size(hid_t plist_id, size_t size);
* I/O pipeline treats the destination datapoints as completely
* uninitialized.
*
- * \todo Add missing version information: introduction, deprecation, etc.
- * Why is the declaration not in the deprecated section?
+ * \since 1.0.0
+ *
+ * \version 1.8.2 Deprecated.
*
*/
H5_DLL herr_t H5Pset_preserve(hid_t plist_id, hbool_t status);
@@ -7952,7 +7970,7 @@ H5_DLL herr_t H5Pset_preserve(hid_t plist_id, hbool_t status);
* function prototype is as follows:
* \snippet H5Tpublic.h H5T_conv_except_func_t_snip
*
- * \todo Add version information.
+ * \since 1.8.0
*
*/
H5_DLL herr_t H5Pset_type_conv_cb(hid_t dxpl_id, H5T_conv_except_func_t op, void *operate_data);
@@ -8002,7 +8020,8 @@ H5_DLL herr_t H5Pset_type_conv_cb(hid_t dxpl_id, H5T_conv_except_func_t op, void
* set to \c NULL and the \p alloc_info and \p free_info parameters are
* ignored.
*
- * \todo Add version information.
+ * \since 1.0.0
+ *
*/
H5_DLL herr_t H5Pset_vlen_mem_manager(hid_t plist_id, H5MM_allocate_t alloc_func, void *alloc_info,
H5MM_free_t free_func, void *free_info);
diff --git a/src/H5RS.c b/src/H5RS.c
index 117c8ea..16c2356 100644
--- a/src/H5RS.c
+++ b/src/H5RS.c
@@ -350,7 +350,7 @@ done:
*/
/* Disable warning for "format not a string literal" here -QAK */
/*
- * This pragma only needs to surround the sprintf() calls with
+ * This pragma only needs to surround the snprintf() calls with
* format_templ in the code below, but early (4.4.7, at least) gcc only
* allows diagnostic pragmas to be toggled outside of functions.
*/
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 0c765d7..7284846 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -296,12 +296,12 @@ H5S__hyper_print_spans_helper(FILE *f, const H5S_hyper_span_t *span, unsigned de
FUNC_ENTER_STATIC_NOERR
while (span) {
- HDfprintf(f, "%s: %*sdepth=%u, span=%p, (%Hu, %Hu), next=%p\n", __func__, depth * 2, "", depth, span,
- span->low, span->high, span->next);
+ HDfprintf(f, "%s: %*sdepth=%u, span=%p, (%" PRIuHSIZE ", %" PRIuHSIZE "), next=%p\n", __func__,
+ depth * 2, "", depth, (void *)span, span->low, span->high, (void *)span->next);
if (span->down) {
- HDfprintf(f, "%s: %*sspans=%p, count=%u, bounds[0]={%Hu, %Hu}, head=%p\n", __func__,
- (depth + 1) * 2, "", span->down, span->down->count, span->down->low_bounds[0],
- span->down->high_bounds[0], span->down->head);
+ HDfprintf(f, "%s: %*sspans=%p, count=%u, bounds[0]={%" PRIuHSIZE ", %" PRIuHSIZE "}, head=%p\n",
+ __func__, (depth + 1) * 2, "", (void *)span->down, span->down->count,
+ span->down->low_bounds[0], span->down->high_bounds[0], (void *)span->down->head);
H5S__hyper_print_spans_helper(f, span->down->head, depth + 1);
} /* end if */
span = span->next;
@@ -316,8 +316,9 @@ H5S__hyper_print_spans(FILE *f, const H5S_hyper_span_info_t *span_lst)
FUNC_ENTER_STATIC_NOERR
if (span_lst != NULL) {
- HDfprintf(f, "%s: spans=%p, count=%u, bounds[0]={%Hu, %Hu}, head=%p\n", __func__, span_lst,
- span_lst->count, span_lst->low_bounds[0], span_lst->high_bounds[0], span_lst->head);
+ HDfprintf(f, "%s: spans=%p, count=%u, bounds[0]={%" PRIuHSIZE ", %" PRIuHSIZE "}, head=%p\n",
+ __func__, (void *)span_lst, span_lst->count, span_lst->low_bounds[0],
+ span_lst->high_bounds[0], (void *)span_lst->head);
H5S__hyper_print_spans_helper(f, span_lst->head, 0);
} /* end if */
@@ -344,16 +345,16 @@ H5S__hyper_print_diminfo_helper(FILE *f, const char *field, unsigned ndims, cons
if (dinfo != NULL) {
HDfprintf(f, "%s: %s: start=[", __func__, field);
for (u = 0; u < ndims; u++)
- HDfprintf(f, "%Hd%s", dinfo[u].start, (u < (ndims - 1) ? ", " : "]\n"));
+ HDfprintf(f, "%" PRIuHSIZE "%s", dinfo[u].start, (u < (ndims - 1) ? ", " : "]\n"));
HDfprintf(f, "%s: %s: stride=[", __func__, field);
for (u = 0; u < ndims; u++)
- HDfprintf(f, "%Hu%s", dinfo[u].stride, (u < (ndims - 1) ? ", " : "]\n"));
+ HDfprintf(f, "%" PRIuHSIZE "%s", dinfo[u].stride, (u < (ndims - 1) ? ", " : "]\n"));
HDfprintf(f, "%s: %s: count=[", __func__, field);
for (u = 0; u < ndims; u++)
- HDfprintf(f, "%Hu%s", dinfo[u].count, (u < (ndims - 1) ? ", " : "]\n"));
+ HDfprintf(f, "%" PRIuHSIZE "%s", dinfo[u].count, (u < (ndims - 1) ? ", " : "]\n"));
HDfprintf(f, "%s: %s: block=[", __func__, field);
for (u = 0; u < ndims; u++)
- HDfprintf(f, "%Hu%s", dinfo[u].block, (u < (ndims - 1) ? ", " : "]\n"));
+ HDfprintf(f, "%" PRIuHSIZE "%s", dinfo[u].block, (u < (ndims - 1) ? ", " : "]\n"));
} /* end if */
else
HDfprintf(f, "%s: %s==NULL\n", __func__, field);
@@ -412,31 +413,31 @@ H5S__hyper_print_spans_dfs(FILE *f, const H5S_hyper_span_info_t *span_lst, unsig
for (u = 0; u < depth; u++)
HDfprintf(f, "\t");
- HDfprintf(f, "DIM[%u]: ref_count=%u, #elems=%u, head=%p, tail=%p, actual_tail=%p, matched=%t\n", depth,
- span_lst->count, num_elems, span_lst->head, span_lst->tail, actual_tail,
+ HDfprintf(f, "DIM[%u]: ref_count=%u, #elems=%u, head=%p, tail=%p, actual_tail=%p, matched=%d\n", depth,
+ span_lst->count, num_elems, (void *)span_lst->head, (void *)span_lst->tail, (void *)actual_tail,
(span_lst->tail == actual_tail));
for (u = 0; u < depth; u++)
HDfprintf(f, "\t");
HDfprintf(f, "low_bounds=[");
for (u = 0; u < dims - 1; u++)
- HDfprintf(f, "%llu,", span_lst->low_bounds[u]);
- HDfprintf(f, "%llu]\n", span_lst->low_bounds[dims - 1]);
+ HDfprintf(f, "%" PRIuHSIZE ",", span_lst->low_bounds[u]);
+ HDfprintf(f, "%" PRIuHSIZE "]\n", span_lst->low_bounds[dims - 1]);
for (u = 0; u < depth; u++)
HDfprintf(f, "\t");
HDfprintf(f, "high_bounds=[");
for (u = 0; u < dims - 1; u++)
- HDfprintf(f, "%llu,", span_lst->high_bounds[u]);
- HDfprintf(f, "%llu]\n", span_lst->high_bounds[dims - 1]);
+ HDfprintf(f, "%" PRIuHSIZE ",", span_lst->high_bounds[u]);
+ HDfprintf(f, "%" PRIuHSIZE "]\n", span_lst->high_bounds[dims - 1]);
cur_elem = span_lst->head;
elem_idx = 0;
while (cur_elem) {
for (u = 0; u < depth; u++)
HDfprintf(f, "\t");
- HDfprintf(f, "ELEM[%u]: ptr=%p, low=%Hu, high=%Hu, down=%p\n", elem_idx++, cur_elem, cur_elem->low,
- cur_elem->high, cur_elem->down);
+ HDfprintf(f, "ELEM[%u]: ptr=%p, low=%" PRIuHSIZE ", high=%" PRIuHSIZE ", down=%p\n", elem_idx++,
+ (void *)cur_elem, cur_elem->low, cur_elem->high, (void *)cur_elem->down);
if (cur_elem->down)
H5S__hyper_print_spans_dfs(f, cur_elem->down, depth + 1, dims);
cur_elem = cur_elem->next;
@@ -473,7 +474,7 @@ H5S__hyper_print_space_dfs(FILE *f, const H5S_t *space)
HDassert(hslab);
HDfprintf(f, "=======================\n");
- HDfprintf(f, "SPACE: span_lst=%p, #dims=%u, offset_changed=%d\n", hslab->span_lst, dims,
+ HDfprintf(f, "SPACE: span_lst=%p, #dims=%u, offset_changed=%d\n", (void *)hslab->span_lst, dims,
space->select.offset_changed);
HDfprintf(f, " offset=[");
@@ -484,25 +485,25 @@ H5S__hyper_print_space_dfs(FILE *f, const H5S_t *space)
HDfprintf(f, " low_bounds=[");
if (space->select.sel_info.hslab->diminfo_valid == H5S_DIMINFO_VALID_YES) {
for (u = 0; u < dims - 1; u++)
- HDfprintf(f, "%llu,", space->select.sel_info.hslab->diminfo.low_bounds[u]);
- HDfprintf(f, "%llu]\n", space->select.sel_info.hslab->diminfo.low_bounds[dims - 1]);
+ HDfprintf(f, "%" PRIuHSIZE ",", space->select.sel_info.hslab->diminfo.low_bounds[u]);
+ HDfprintf(f, "%" PRIuHSIZE "]\n", space->select.sel_info.hslab->diminfo.low_bounds[dims - 1]);
} /* end if */
else {
for (u = 0; u < dims - 1; u++)
- HDfprintf(f, "%llu,", space->select.sel_info.hslab->span_lst->low_bounds[u]);
- HDfprintf(f, "%llu]\n", space->select.sel_info.hslab->span_lst->low_bounds[dims - 1]);
+ HDfprintf(f, "%" PRIuHSIZE ",", space->select.sel_info.hslab->span_lst->low_bounds[u]);
+ HDfprintf(f, "%" PRIuHSIZE "]\n", space->select.sel_info.hslab->span_lst->low_bounds[dims - 1]);
} /* end else */
HDfprintf(f, " high_bounds=[");
if (space->select.sel_info.hslab->diminfo_valid == H5S_DIMINFO_VALID_YES) {
for (u = 0; u < dims - 1; u++)
- HDfprintf(f, "%llu,", space->select.sel_info.hslab->diminfo.high_bounds[u]);
- HDfprintf(f, "%llu]\n", space->select.sel_info.hslab->diminfo.high_bounds[dims - 1]);
+ HDfprintf(f, "%" PRIuHSIZE ",", space->select.sel_info.hslab->diminfo.high_bounds[u]);
+ HDfprintf(f, "%" PRIuHSIZE "]\n", space->select.sel_info.hslab->diminfo.high_bounds[dims - 1]);
} /* end if */
else {
for (u = 0; u < dims - 1; u++)
- HDfprintf(f, "%llu,", space->select.sel_info.hslab->span_lst->high_bounds[u]);
- HDfprintf(f, "%llu]\n", space->select.sel_info.hslab->span_lst->high_bounds[dims - 1]);
+ HDfprintf(f, "%" PRIuHSIZE ",", space->select.sel_info.hslab->span_lst->high_bounds[u]);
+ HDfprintf(f, "%" PRIuHSIZE "]\n", space->select.sel_info.hslab->span_lst->high_bounds[dims - 1]);
} /* end else */
/* Print out diminfo, if it's valid */
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index aec5560..da9b41d 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -223,7 +223,7 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint *
/* Check whether standard or BIGIO processing will be employeed */
if (bigio_count >= num_points) {
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
/* Create an MPI datatype for the whole point selection */
if (MPI_SUCCESS !=
(mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type)))
@@ -284,7 +284,7 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint *
#endif
for (i = 0; i < num_big_types; i++) {
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)bigio_count, 1,
&disp[(hsize_t)i * bigio_count],
elmt_type, &inner_types[i])))
@@ -300,7 +300,7 @@ H5S__mpio_create_point_datatype(size_t elmt_size, hsize_t num_points, MPI_Aint *
} /* end for*/
if (remaining_points) {
-#if MPI_VERSION >= 3
+#if H5_CHECK_MPI_VERSION(3, 0)
if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(
remaining_points, 1, &disp[(hsize_t)num_big_types * bigio_count],
elmt_type, &inner_types[num_big_types])))
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index 240b722..bc667b1 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -1060,7 +1060,7 @@ H5S__point_get_version_enc_size(const H5S_t *space, uint32_t *version, uint8_t *
hsize_t bounds_start[H5S_MAX_RANK]; /* Starting coordinate of bounding box */
hsize_t bounds_end[H5S_MAX_RANK]; /* Opposite coordinate of bounding box */
hsize_t max_size = 0; /* Maximum selection size */
- unsigned u; /* Local index veriable */
+ unsigned u; /* Local index variable */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
diff --git a/src/H5Spublic.h b/src/H5Spublic.h
index 30ca813..536f290 100644
--- a/src/H5Spublic.h
+++ b/src/H5Spublic.h
@@ -171,7 +171,7 @@ H5_DLL herr_t H5Sclose(hid_t space_id);
* composing the entire current extent). If either \p stride or
* \p block is NULL, then it will be set to \p 1.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL hid_t H5Scombine_hyperslab(hid_t space_id, H5S_seloper_t op, const hsize_t start[],
@@ -194,7 +194,7 @@ H5_DLL hid_t H5Scombine_hyperslab(hid_t space_id, H5S_seloper_t op, const hsize_
* from \p space1_id is copied for the dataspace extent of the
* newly created dataspace.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL hid_t H5Scombine_select(hid_t space1_id, H5S_seloper_t op, hid_t space2_id);
@@ -795,7 +795,7 @@ H5_DLL htri_t H5Sis_simple(hid_t space_id);
* \p space2_id. The first selection is modified to contain the
* result of \p space1_id operated on by \p space2_id.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL herr_t H5Smodify_select(hid_t space1_id, H5S_seloper_t op, hid_t space2_id);
@@ -940,7 +940,7 @@ H5_DLL herr_t H5Ssel_iter_reset(hid_t sel_iter_id, hid_t space_id);
*
* \note This can be useful for VOL developers to implement chunked datasets.
*
- * \since 1.12.0
+ * \since 1.10.6
*/
H5_DLL herr_t H5Sselect_adjust(hid_t spaceid, const hssize_t *offset);
/**
@@ -977,7 +977,7 @@ H5_DLL herr_t H5Sselect_all(hid_t spaceid);
* offset) from the source dataspace \p src_id to the destination
* dataspace \p dst_id.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL herr_t H5Sselect_copy(hid_t dst_id, hid_t src_id);
@@ -1205,7 +1205,7 @@ H5_DLL herr_t H5Sselect_hyperslab(hid_t space_id, H5S_seloper_t op, const hsize_
* \note Assumes that \p start & \p end block bounds are inclusive, so
* \p start == \p end value is OK.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL htri_t H5Sselect_intersect_block(hid_t space_id, const hsize_t *start, const hsize_t *end);
@@ -1244,7 +1244,7 @@ H5_DLL herr_t H5Sselect_none(hid_t spaceid);
* into a third selection.This can be useful for VOL developers to
* implement chunked or virtual datasets.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL hid_t H5Sselect_project_intersection(hid_t src_space_id, hid_t dst_space_id,
@@ -1265,7 +1265,7 @@ H5_DLL hid_t H5Sselect_project_intersection(hid_t src_space_id, hid_t dst_space_
* This is primarily used for reading the entire selection in
* one swoop.
*
- * \since 1.12.0
+ * \since 1.10.6
*
*/
H5_DLL htri_t H5Sselect_shape_same(hid_t space1_id, hid_t space2_id);
diff --git a/src/H5TS.c b/src/H5TS.c
index 20aa283..37fd66a 100644
--- a/src/H5TS.c
+++ b/src/H5TS.c
@@ -192,10 +192,10 @@ H5TS_tid_destructor(void *_v)
return;
/* TBD use an atomic CAS */
- HDpthread_mutex_lock(&H5TS_tid_mtx);
+ pthread_mutex_lock(&H5TS_tid_mtx);
tid->next = H5TS_tid_next_free;
H5TS_tid_next_free = tid;
- HDpthread_mutex_unlock(&H5TS_tid_mtx);
+ pthread_mutex_unlock(&H5TS_tid_mtx);
}
/*--------------------------------------------------------------------------
@@ -215,8 +215,8 @@ H5TS_tid_destructor(void *_v)
static void
H5TS_tid_init(void)
{
- HDpthread_mutex_init(&H5TS_tid_mtx, NULL);
- HDpthread_key_create(&H5TS_tid_key, H5TS_tid_destructor);
+ pthread_mutex_init(&H5TS_tid_mtx, NULL);
+ pthread_key_create(&H5TS_tid_key, H5TS_tid_destructor);
}
/*--------------------------------------------------------------------------
@@ -246,7 +246,7 @@ H5TS_tid_init(void)
uint64_t
H5TS_thread_id(void)
{
- H5TS_tid_t *tid = HDpthread_getspecific(H5TS_tid_key);
+ H5TS_tid_t *tid = pthread_getspecific(H5TS_tid_key);
H5TS_tid_t proto_tid;
/* An ID is already assigned. */
@@ -260,14 +260,14 @@ H5TS_thread_id(void)
* point `tid` at `proto_tid` if we need to allocate some
* memory.
*/
- HDpthread_mutex_lock(&H5TS_tid_mtx);
+ pthread_mutex_lock(&H5TS_tid_mtx);
if ((tid = H5TS_tid_next_free) != NULL)
H5TS_tid_next_free = tid->next;
else if (H5TS_tid_next_id != UINT64_MAX) {
tid = &proto_tid;
tid->id = ++H5TS_tid_next_id;
}
- HDpthread_mutex_unlock(&H5TS_tid_mtx);
+ pthread_mutex_unlock(&H5TS_tid_mtx);
/* If a prototype ID record was established, copy it to the heap. */
if (tid == &proto_tid)
@@ -281,7 +281,7 @@ H5TS_thread_id(void)
* to it.
*/
tid->next = NULL;
- if (HDpthread_setspecific(H5TS_tid_key, tid) != 0) {
+ if (pthread_setspecific(H5TS_tid_key, tid) != 0) {
H5TS_tid_destructor(tid);
return 0;
}
@@ -323,29 +323,29 @@ H5TS_pthread_first_thread_init(void)
#endif
/* initialize global API mutex lock */
- HDpthread_mutex_init(&H5_g.init_lock.atomic_lock, NULL);
- HDpthread_cond_init(&H5_g.init_lock.cond_var, NULL);
+ pthread_mutex_init(&H5_g.init_lock.atomic_lock, NULL);
+ pthread_cond_init(&H5_g.init_lock.cond_var, NULL);
H5_g.init_lock.lock_count = 0;
- HDpthread_mutex_init(&H5_g.init_lock.atomic_lock2, NULL);
+ pthread_mutex_init(&H5_g.init_lock.atomic_lock2, NULL);
H5_g.init_lock.attempt_lock_count = 0;
/* Initialize integer thread identifiers. */
H5TS_tid_init();
/* initialize key for thread-specific error stacks */
- HDpthread_key_create(&H5TS_errstk_key_g, H5TS__key_destructor);
+ pthread_key_create(&H5TS_errstk_key_g, H5TS__key_destructor);
#ifdef H5_HAVE_CODESTACK
/* initialize key for thread-specific function stacks */
- HDpthread_key_create(&H5TS_funcstk_key_g, H5TS__key_destructor);
+ pthread_key_create(&H5TS_funcstk_key_g, H5TS__key_destructor);
#endif /* H5_HAVE_CODESTACK */
/* initialize key for thread-specific API contexts */
- HDpthread_key_create(&H5TS_apictx_key_g, H5TS__key_destructor);
+ pthread_key_create(&H5TS_apictx_key_g, H5TS__key_destructor);
/* initialize key for thread cancellability mechanism */
- HDpthread_key_create(&H5TS_cancel_key_s, H5TS__key_destructor);
+ pthread_key_create(&H5TS_cancel_key_s, H5TS__key_destructor);
FUNC_LEAVE_NOAPI_VOID_NAMECHECK_ONLY
} /* end H5TS_pthread_first_thread_init() */
@@ -380,13 +380,13 @@ H5TS__mutex_acquire(H5TS_mutex_t *mutex, unsigned int lock_count, hbool_t *acqui
*acquired = TRUE;
#else /* H5_HAVE_WIN_THREADS */
/* Attempt to acquire the mutex lock */
- if (0 == HDpthread_mutex_lock(&mutex->atomic_lock)) {
- pthread_t my_thread_id = HDpthread_self();
+ if (0 == pthread_mutex_lock(&mutex->atomic_lock)) {
+ pthread_t my_thread_id = pthread_self();
/* Check if locked already */
if (mutex->lock_count) {
/* Check for this thread already owning the lock */
- if (HDpthread_equal(my_thread_id, mutex->owner_thread)) {
+ if (pthread_equal(my_thread_id, mutex->owner_thread)) {
/* Already owned by self - increment count */
mutex->lock_count += lock_count;
*acquired = TRUE;
@@ -401,7 +401,7 @@ H5TS__mutex_acquire(H5TS_mutex_t *mutex, unsigned int lock_count, hbool_t *acqui
*acquired = TRUE;
} /* end else */
- if (0 != HDpthread_mutex_unlock(&mutex->atomic_lock))
+ if (0 != pthread_mutex_unlock(&mutex->atomic_lock))
ret_value = -1;
} /* end if */
else
@@ -463,35 +463,35 @@ herr_t H5TS_mutex_lock(H5TS_mutex_t *mutex)
EnterCriticalSection(&mutex->CriticalSection);
#else /* H5_HAVE_WIN_THREADS */
/* Acquire the "attempt" lock, increment the attempt lock count, release the lock */
- ret_value = HDpthread_mutex_lock(&mutex->atomic_lock2);
+ ret_value = pthread_mutex_lock(&mutex->atomic_lock2);
if (ret_value)
HGOTO_DONE(ret_value);
mutex->attempt_lock_count++;
- ret_value = HDpthread_mutex_unlock(&mutex->atomic_lock2);
+ ret_value = pthread_mutex_unlock(&mutex->atomic_lock2);
if (ret_value)
HGOTO_DONE(ret_value);
/* Acquire the library lock */
- ret_value = HDpthread_mutex_lock(&mutex->atomic_lock);
+ ret_value = pthread_mutex_lock(&mutex->atomic_lock);
if (ret_value)
HGOTO_DONE(ret_value);
/* Check if this thread already owns the lock */
- if (mutex->lock_count && HDpthread_equal(HDpthread_self(), mutex->owner_thread))
+ if (mutex->lock_count && pthread_equal(pthread_self(), mutex->owner_thread))
/* already owned by self - increment count */
mutex->lock_count++;
else {
/* Wait until the lock is released by current owner thread */
while (mutex->lock_count)
- HDpthread_cond_wait(&mutex->cond_var, &mutex->atomic_lock);
+ pthread_cond_wait(&mutex->cond_var, &mutex->atomic_lock);
/* After we've received the signal, take ownership of the mutex */
- mutex->owner_thread = HDpthread_self();
+ mutex->owner_thread = pthread_self();
mutex->lock_count = 1;
} /* end else */
/* Release the library lock */
- ret_value = HDpthread_mutex_unlock(&mutex->atomic_lock);
+ ret_value = pthread_mutex_unlock(&mutex->atomic_lock);
done:
#endif /* H5_HAVE_WIN_THREADS */
@@ -530,12 +530,12 @@ H5TS__mutex_unlock(H5TS_mutex_t *mutex, unsigned int *lock_count)
#else /* H5_HAVE_WIN_THREADS */
/* Reset the lock count for this thread */
- ret_value = HDpthread_mutex_lock(&mutex->atomic_lock);
+ ret_value = pthread_mutex_lock(&mutex->atomic_lock);
if (ret_value)
HGOTO_DONE(ret_value);
*lock_count = mutex->lock_count;
mutex->lock_count = 0;
- ret_value = HDpthread_mutex_unlock(&mutex->atomic_lock);
+ ret_value = pthread_mutex_unlock(&mutex->atomic_lock);
/* If the lock count drops to zero, signal the condition variable, to
* wake another thread.
@@ -543,7 +543,7 @@ H5TS__mutex_unlock(H5TS_mutex_t *mutex, unsigned int *lock_count)
if (mutex->lock_count == 0) {
int err;
- err = HDpthread_cond_signal(&mutex->cond_var);
+ err = pthread_cond_signal(&mutex->cond_var);
if (err != 0)
ret_value = err;
} /* end if */
@@ -586,11 +586,11 @@ H5TS_mutex_unlock(H5TS_mutex_t *mutex)
#else /* H5_HAVE_WIN_THREADS */
/* Decrement the lock count for this thread */
- ret_value = HDpthread_mutex_lock(&mutex->atomic_lock);
+ ret_value = pthread_mutex_lock(&mutex->atomic_lock);
if (ret_value)
HGOTO_DONE(ret_value);
mutex->lock_count--;
- ret_value = HDpthread_mutex_unlock(&mutex->atomic_lock);
+ ret_value = pthread_mutex_unlock(&mutex->atomic_lock);
/* If the lock count drops to zero, signal the condition variable, to
* wake another thread.
@@ -598,7 +598,7 @@ H5TS_mutex_unlock(H5TS_mutex_t *mutex)
if (mutex->lock_count == 0) {
int err;
- err = HDpthread_cond_signal(&mutex->cond_var);
+ err = pthread_cond_signal(&mutex->cond_var);
if (err != 0)
ret_value = err;
} /* end if */
@@ -630,13 +630,13 @@ H5TSmutex_get_attempt_count(unsigned int *count)
#ifdef H5_HAVE_WIN_THREADS
/* Add Win32 equivalent here when async is supported */
#else /* H5_HAVE_WIN_THREADS */
- ret_value = HDpthread_mutex_lock(&H5_g.init_lock.atomic_lock2);
+ ret_value = pthread_mutex_lock(&H5_g.init_lock.atomic_lock2);
if (ret_value)
HGOTO_DONE(ret_value);
*count = H5_g.init_lock.attempt_lock_count;
- ret_value = HDpthread_mutex_unlock(&H5_g.init_lock.atomic_lock2);
+ ret_value = pthread_mutex_unlock(&H5_g.init_lock.atomic_lock2);
if (ret_value)
HGOTO_DONE(ret_value);
@@ -725,7 +725,7 @@ H5TS_cancel_count_inc(void)
HGOTO_DONE(FAIL);
/* Set the thread's cancellation counter with the new object */
- ret_value = HDpthread_setspecific(H5TS_cancel_key_s, (void *)cancel_counter);
+ ret_value = pthread_setspecific(H5TS_cancel_key_s, (void *)cancel_counter);
if (ret_value) {
HDfree(cancel_counter);
HGOTO_DONE(FAIL);
@@ -735,7 +735,7 @@ H5TS_cancel_count_inc(void)
/* Check if thread entering library */
if (cancel_counter->cancel_count == 0)
/* Set cancellation state to 'disable', and remember previous state */
- ret_value = HDpthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_counter->previous_state);
+ ret_value = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_counter->previous_state);
/* Increment # of times the library API was re-entered, to avoid resetting
* previous cancellation state until the final API routine is returning.
@@ -788,7 +788,7 @@ H5TS_cancel_count_dec(void)
/* Check for leaving last API routine */
if (cancel_counter->cancel_count == 1)
/* Reset to previous thread cancellation state, if last API */
- ret_value = HDpthread_setcancelstate(cancel_counter->previous_state, NULL);
+ ret_value = pthread_setcancelstate(cancel_counter->previous_state, NULL);
/* Decrement cancellation counter */
--cancel_counter->cancel_count;
@@ -995,7 +995,7 @@ H5TS_create_thread(H5TS_thread_cb_t func, H5TS_attr_t *attr, void *udata)
#else /* H5_HAVE_WIN_THREADS */
- HDpthread_create(&ret_value, attr, (void *(*)(void *))func, udata);
+ pthread_create(&ret_value, attr, (void *(*)(void *))func, udata);
#endif /* H5_HAVE_WIN_THREADS */
diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h
index 3150f59..5d69854 100644
--- a/src/H5TSprivate.h
+++ b/src/H5TSprivate.h
@@ -13,11 +13,9 @@
/*-------------------------------------------------------------------------
*
- * Created: H5TSprivate.h
- * May 2 2000
- * Chee Wai LEE
+ * Created: H5TSprivate.h
*
- * Purpose: Private non-prototype header.
+ * Purpose: Thread-safety abstractions used by the library
*
*-------------------------------------------------------------------------
*/
diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c
index e99494a..a3a1aa0 100644
--- a/src/H5Tcommit.c
+++ b/src/H5Tcommit.c
@@ -1127,7 +1127,7 @@ H5T_open(const H5G_loc_t *loc)
done:
if (ret_value == NULL) {
if (dt) {
- if (shared_fo == NULL) { /* Need to free shared of */
+ if (shared_fo == NULL) { /* Need to free shared file object */
if (dt->shared->owned_vol_obj && H5VL_free_object(dt->shared->owned_vol_obj) < 0)
HDONE_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, NULL, "unable to close owned VOL object")
dt->shared = H5FL_FREE(H5T_shared_t, dt->shared);
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index 94697af..5efff10 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -2753,7 +2753,7 @@ H5T__conv_enum_init(H5T_t *src, H5T_t *dst, H5T_cdata_t *cdata)
HDassert(domain[1] >= domain[0]);
length = (unsigned)(domain[1] - domain[0]) + 1;
if (src->shared->u.enumer.nmembs < 2 ||
- (double)length / src->shared->u.enumer.nmembs < (double)(1.2f)) {
+ (double)length / src->shared->u.enumer.nmembs < (double)(1.2F)) {
priv->base = domain[0];
priv->length = length;
if (NULL == (map = (int *)H5MM_malloc(length * sizeof(int))))
diff --git a/src/H5Tnative.c b/src/H5Tnative.c
index 4529e57..e6fab51 100644
--- a/src/H5Tnative.c
+++ b/src/H5Tnative.c
@@ -43,7 +43,7 @@ static herr_t H5T__cmp_offset(size_t *comp_size, size_t *offset, size_t elem_siz
*
* Purpose: High-level API to return the native type of a datatype.
* The native type is chosen by matching the size and class of
- * querried datatype from the following native premitive
+ * queried datatype from the following native primitive
* datatypes:
* H5T_NATIVE_CHAR H5T_NATIVE_UCHAR
* H5T_NATIVE_SHORT H5T_NATIVE_USHORT
@@ -56,7 +56,7 @@ static herr_t H5T__cmp_offset(size_t *comp_size, size_t *offset, size_t elem_siz
* H5T_NATIVE_LDOUBLE
*
* Compound, array, enum, and VL types all choose among these
- * types for their members. Time, Bifield, Opaque, Reference
+ * types for their members. Time, Bitfield, Opaque, Reference
* types are only copy out.
*
* Return: Success: Returns the native data type if successful.
@@ -696,7 +696,7 @@ H5_GCC_DIAG_OFF("duplicated-branches")
/*-------------------------------------------------------------------------
* Function: H5T__get_native_float
*
- * Purpose: Returns the native floatt type of a datatype.
+ * Purpose: Returns the native float type of a datatype.
*
* Return: Success: Returns the native data type if successful.
*
diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h
index 6624096..9731379 100644
--- a/src/H5Tprivate.h
+++ b/src/H5Tprivate.h
@@ -101,6 +101,8 @@ typedef struct H5T_subset_info_t {
} H5T_subset_info_t;
/* Forward declarations for prototype arguments */
+struct H5G_loc_t;
+struct H5G_name_t;
struct H5O_shared_t;
/* The native endianness of the platform */
@@ -120,14 +122,14 @@ H5_DLL size_t H5T_get_size(const H5T_t *dt);
H5_DLL hbool_t H5T_get_force_conv(const H5T_t *dt);
H5_DLL int H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset);
H5_DLL herr_t H5T_encode(H5T_t *obj, unsigned char *buf, size_t *nalloc);
-H5_DLL H5T_t * H5T_decode(size_t buf_size, const unsigned char *buf);
-H5_DLL herr_t H5T_debug(const H5T_t *dt, FILE *stream);
-H5_DLL struct H5O_loc_t *H5T_oloc(H5T_t *dt);
-H5_DLL H5G_name_t *H5T_nameof(const H5T_t *dt);
-H5_DLL htri_t H5T_is_immutable(const H5T_t *dt);
-H5_DLL htri_t H5T_is_named(const H5T_t *dt);
-H5_DLL herr_t H5T_convert_committed_datatype(H5T_t *dt, H5F_t *f);
-H5_DLL htri_t H5T_is_relocatable(const H5T_t *dt);
+H5_DLL H5T_t * H5T_decode(size_t buf_size, const unsigned char *buf);
+H5_DLL herr_t H5T_debug(const H5T_t *dt, FILE *stream);
+H5_DLL struct H5O_loc_t * H5T_oloc(H5T_t *dt);
+H5_DLL struct H5G_name_t *H5T_nameof(const H5T_t *dt);
+H5_DLL htri_t H5T_is_immutable(const H5T_t *dt);
+H5_DLL htri_t H5T_is_named(const H5T_t *dt);
+H5_DLL herr_t H5T_convert_committed_datatype(H5T_t *dt, H5F_t *f);
+H5_DLL htri_t H5T_is_relocatable(const H5T_t *dt);
H5_DLL H5T_path_t *H5T_path_find(const H5T_t *src, const H5T_t *dst);
H5_DLL hbool_t H5T_path_noop(const H5T_path_t *p);
H5_DLL H5T_bkg_t H5T_path_bkg(const H5T_path_t *p);
@@ -159,7 +161,7 @@ H5_DLL herr_t H5T_invoke_vol_optional(H5T_t *dt, H5VL_optional_args_t *args, hi
H5_DLL H5R_type_t H5T_get_ref_type(const H5T_t *dt);
/* Operations on named datatypes */
-H5_DLL H5T_t *H5T_open(const H5G_loc_t *loc);
+H5_DLL H5T_t *H5T_open(const struct H5G_loc_t *loc);
H5_DLL int H5T_link(const H5T_t *type, int adjust);
H5_DLL herr_t H5T_update_shared(H5T_t *type);
diff --git a/src/H5VLcallback.c b/src/H5VLcallback.c
index 4cf4d53..0c5c73d 100644
--- a/src/H5VLcallback.c
+++ b/src/H5VLcallback.c
@@ -30,7 +30,7 @@
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5ESprivate.h" /* Event Sets */
-#include "H5Fprivate.h" /* File access */
+#include "H5Fprivate.h" /* File access */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
diff --git a/src/H5VLnative.h b/src/H5VLnative.h
index fe8ede2..5e43c4e 100644
--- a/src/H5VLnative.h
+++ b/src/H5VLnative.h
@@ -74,7 +74,7 @@ typedef union H5VL_native_attr_optional_args_t {
#define H5VL_NATIVE_DATASET_CHUNK_WRITE 7 /* H5Dchunk_write */
#define H5VL_NATIVE_DATASET_GET_VLEN_BUF_SIZE 8 /* H5Dvlen_get_buf_size */
#define H5VL_NATIVE_DATASET_GET_OFFSET 9 /* H5Dget_offset */
-#define H5VL_NATIVE_DATASET_CHUNK_ITER 10 /* H5Dget_offset */
+#define H5VL_NATIVE_DATASET_CHUNK_ITER 10 /* H5Dchunk_iter */
/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
* must be updated.
*/
@@ -208,8 +208,8 @@ typedef union H5VL_native_dataset_optional_args_t {
#ifdef H5_HAVE_PARALLEL
#define H5VL_NATIVE_FILE_GET_MPI_ATOMICITY 26 /* H5Fget_mpi_atomicity */
#define H5VL_NATIVE_FILE_SET_MPI_ATOMICITY 27 /* H5Fset_mpi_atomicity */
-#endif /* H5_HAVE_PARALLEL */
-#define H5VL_NATIVE_FILE_POST_OPEN 28 /* Adjust file after open, with wrapping context */
+#endif
+#define H5VL_NATIVE_FILE_POST_OPEN 28 /* Adjust file after open, with wrapping context */
/* NOTE: If values over 1023 are added, the H5VL_RESERVED_NATIVE_OPTIONAL macro
* must be updated.
*/
diff --git a/src/H5VLnative_token.c b/src/H5VLnative_token.c
index bed0164..b5bd7b8 100644
--- a/src/H5VLnative_token.c
+++ b/src/H5VLnative_token.c
@@ -112,7 +112,7 @@ H5VL__native_token_to_str(void *obj, H5I_type_t obj_type, const H5O_token_t *tok
if (NULL == (*token_str = H5MM_malloc(addr_ndigits + 1)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocate buffer for token string")
- HDsnprintf(*token_str, addr_ndigits + 1, H5_PRINTF_HADDR_FMT, addr);
+ HDsnprintf(*token_str, addr_ndigits + 1, "%" PRIuHADDR, addr);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -139,7 +139,7 @@ H5VL__native_str_to_token(void *obj, H5I_type_t obj_type, const char *token_str,
/* Check parameters */
HDassert(token_str);
- HDsscanf(token_str, H5_PRINTF_HADDR_FMT, &addr);
+ HDsscanf(token_str, "%" PRIuHADDR, &addr);
if (H5VL_native_addr_to_token(obj, obj_type, addr, token) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't convert address to object token")
diff --git a/src/H5VLpassthru.c b/src/H5VLpassthru.c
index 6b4c59f..6eda875 100644
--- a/src/H5VLpassthru.c
+++ b/src/H5VLpassthru.c
@@ -641,16 +641,13 @@ H5VL_pass_through_info_to_str(const void *_info, char **str)
under_vol_str_len = strlen(under_vol_string);
/* Allocate space for our info */
- *str = (char *)H5allocate_memory(32 + under_vol_str_len, (hbool_t)0);
+ size_t strSize = 32 + under_vol_str_len;
+ *str = (char *)H5allocate_memory(strSize, (hbool_t)0);
assert(*str);
- /* Encode our info
- * Normally we'd use snprintf() here for a little extra safety, but that
- * call had problems on Windows until recently. So, to be as platform-independent
- * as we can, we're using sprintf() instead.
- */
- sprintf(*str, "under_vol=%u;under_info={%s}", (unsigned)under_value,
- (under_vol_string ? under_vol_string : ""));
+ /* Encode our info */
+ snprintf(*str, strSize, "under_vol=%u;under_info={%s}", (unsigned)under_value,
+ (under_vol_string ? under_vol_string : ""));
return 0;
} /* end H5VL_pass_through_info_to_str() */
diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h
index 0d3bd0f..e773bae 100644
--- a/src/H5VMprivate.h
+++ b/src/H5VMprivate.h
@@ -393,8 +393,8 @@ static const unsigned char LogTable256[] = {
static inline unsigned H5_ATTR_UNUSED
H5VM_log2_gen(uint64_t n)
{
- unsigned r; /* r will be log2(n) */
- register unsigned int t, tt, ttt; /* temporaries */
+ unsigned r; /* r will be log2(n) */
+ unsigned int t, tt, ttt; /* temporaries */
if ((ttt = (unsigned)(n >> 32)))
if ((tt = (unsigned)(n >> 48)))
diff --git a/src/H5Z.c b/src/H5Z.c
index bcdd837..763eac2 100644
--- a/src/H5Z.c
+++ b/src/H5Z.c
@@ -594,14 +594,9 @@ H5Z__flush_file_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void H5_ATTR_PARA
/* Do a global flush if the file is opened for write */
if (H5F_ACC_RDWR & H5F_INTENT(f)) {
-/* When parallel HDF5 is defined, check for collective metadata reads on this
- * file and set the flag for metadata I/O in the API context. -QAK, 2018/02/14
- */
#ifdef H5_HAVE_PARALLEL
/* Check if MPIO driver is used */
if (H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
- H5P_coll_md_read_flag_t coll_md_read; /* Do all metadata reads collectively */
-
/* Sanity check for collectively calling H5Zunregister, if requested */
/* (Sanity check assumes that a barrier on one file's comm
* is sufficient (i.e. that there aren't different comms for
@@ -621,13 +616,8 @@ H5Z__flush_file_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void H5_ATTR_PARA
/* Set the "sanity checked" flag */
object->sanity_checked = TRUE;
} /* end if */
-
- /* Check whether to use the collective metadata read DXPL */
- coll_md_read = H5F_COLL_MD_READ(f);
- if (H5P_USER_TRUE == coll_md_read)
- H5CX_set_coll_metadata_read(TRUE);
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
/* Call the flush routine for mounted file hierarchies */
if (H5F_flush_mounts((H5F_t *)obj_ptr) < 0)
diff --git a/src/H5Znbit.c b/src/H5Znbit.c
index 21363bc..905d417 100644
--- a/src/H5Znbit.c
+++ b/src/H5Znbit.c
@@ -975,7 +975,7 @@ H5Z__filter_nbit(unsigned flags, size_t cd_nelmts, const unsigned cd_values[], s
/* input; decompress */
if (flags & H5Z_FLAG_REVERSE) {
- size_out = d_nelmts * cd_values[4]; /* cd_values[4] stores datatype size */
+ size_out = d_nelmts * (size_t)cd_values[4]; /* cd_values[4] stores datatype size */
/* allocate memory space for decompressed buffer */
if (NULL == (outbuf = (unsigned char *)H5MM_malloc(size_out)))
@@ -1170,7 +1170,8 @@ H5Z__nbit_decompress_one_array(unsigned char *data, size_t data_offset, unsigned
n = total_size / p.size;
for (i = 0; i < n; i++)
- H5Z__nbit_decompress_one_atomic(data, data_offset + i * p.size, buffer, j, buf_len, &p);
+ H5Z__nbit_decompress_one_atomic(data, data_offset + i * (size_t)p.size, buffer, j, buf_len,
+ &p);
break;
case H5Z_NBIT_ARRAY:
@@ -1178,8 +1179,8 @@ H5Z__nbit_decompress_one_array(unsigned char *data, size_t data_offset, unsigned
n = total_size / base_size; /* number of base_type elements inside the array datatype */
begin_index = *parms_index;
for (i = 0; i < n; i++) {
- if (H5Z__nbit_decompress_one_array(data, data_offset + i * base_size, buffer, j, buf_len,
- parms, parms_index) < 0)
+ if (H5Z__nbit_decompress_one_array(data, data_offset + i * (size_t)base_size, buffer, j,
+ buf_len, parms, parms_index) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "can't decompress array")
*parms_index = begin_index;
}
@@ -1190,8 +1191,8 @@ H5Z__nbit_decompress_one_array(unsigned char *data, size_t data_offset, unsigned
n = total_size / base_size; /* number of base_type elements inside the array datatype */
begin_index = *parms_index;
for (i = 0; i < n; i++) {
- if (H5Z__nbit_decompress_one_compound(data, data_offset + i * base_size, buffer, j, buf_len,
- parms, parms_index) < 0)
+ if (H5Z__nbit_decompress_one_compound(data, data_offset + i * (size_t)base_size, buffer, j,
+ buf_len, parms, parms_index) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "can't decompress compound")
*parms_index = begin_index;
}
@@ -1291,7 +1292,7 @@ H5Z__nbit_decompress(unsigned char *data, unsigned d_nelmts, unsigned char *buff
FUNC_ENTER_STATIC
/* may not have to initialize to zeros */
- HDmemset(data, 0, d_nelmts * parms[4]);
+ HDmemset(data, 0, d_nelmts * (size_t)parms[4]);
/* initialization before the loop */
j = 0;
@@ -1309,7 +1310,7 @@ H5Z__nbit_decompress(unsigned char *data, unsigned d_nelmts, unsigned char *buff
HGOTO_ERROR(H5E_PLINE, H5E_BADTYPE, FAIL, "invalid datatype precision/offset")
for (i = 0; i < d_nelmts; i++)
- H5Z__nbit_decompress_one_atomic(data, i * p.size, buffer, &j, &buf_len, &p);
+ H5Z__nbit_decompress_one_atomic(data, i * (size_t)p.size, buffer, &j, &buf_len, &p);
break;
case H5Z_NBIT_ARRAY:
@@ -1468,7 +1469,7 @@ H5Z__nbit_compress_one_array(unsigned char *data, size_t data_offset, unsigned c
p.offset = parms[(*parms_index)++];
n = total_size / p.size;
for (i = 0; i < n; i++)
- H5Z__nbit_compress_one_atomic(data, data_offset + i * p.size, buffer, j, buf_len, &p);
+ H5Z__nbit_compress_one_atomic(data, data_offset + i * (size_t)p.size, buffer, j, buf_len, &p);
break;
case H5Z_NBIT_ARRAY:
@@ -1476,8 +1477,8 @@ H5Z__nbit_compress_one_array(unsigned char *data, size_t data_offset, unsigned c
n = total_size / base_size; /* number of base_type elements inside the array datatype */
begin_index = *parms_index;
for (i = 0; i < n; i++) {
- H5Z__nbit_compress_one_array(data, data_offset + i * base_size, buffer, j, buf_len, parms,
- parms_index);
+ H5Z__nbit_compress_one_array(data, data_offset + i * (size_t)base_size, buffer, j, buf_len,
+ parms, parms_index);
*parms_index = begin_index;
}
break;
@@ -1487,8 +1488,8 @@ H5Z__nbit_compress_one_array(unsigned char *data, size_t data_offset, unsigned c
n = total_size / base_size; /* number of base_type elements inside the array datatype */
begin_index = *parms_index;
for (i = 0; i < n; i++) {
- H5Z__nbit_compress_one_compound(data, data_offset + i * base_size, buffer, j, buf_len, parms,
- parms_index);
+ H5Z__nbit_compress_one_compound(data, data_offset + i * (size_t)base_size, buffer, j, buf_len,
+ parms, parms_index);
*parms_index = begin_index;
}
break;
@@ -1574,7 +1575,7 @@ H5Z__nbit_compress(unsigned char *data, unsigned d_nelmts, unsigned char *buffer
p.offset = parms[7];
for (i = 0; i < d_nelmts; i++)
- H5Z__nbit_compress_one_atomic(data, i * p.size, buffer, &new_size, &buf_len, &p);
+ H5Z__nbit_compress_one_atomic(data, i * (size_t)p.size, buffer, &new_size, &buf_len, &p);
break;
case H5Z_NBIT_ARRAY:
diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c
index 9942cee..46c1a10 100644
--- a/src/H5Zscaleoffset.c
+++ b/src/H5Zscaleoffset.c
@@ -1205,7 +1205,7 @@ H5Z__filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_valu
/* prepare parameters to pass to compress/decompress functions */
p.size = cd_values[H5Z_SCALEOFFSET_PARM_SIZE];
- p.mem_order = H5T_native_order_g;
+ p.mem_order = (unsigned)H5T_native_order_g;
/* input; decompress */
if (flags & H5Z_FLAG_REVERSE) {
@@ -1240,7 +1240,7 @@ H5Z__filter_scaleoffset(unsigned flags, size_t cd_nelmts, const unsigned cd_valu
p.minbits = minbits;
/* calculate size of output buffer after decompression */
- size_out = d_nelmts * p.size;
+ size_out = d_nelmts * (size_t)p.size;
/* allocate memory space for decompressed buffer */
if (NULL == (outbuf = (unsigned char *)H5MM_malloc(size_out)))
@@ -1403,7 +1403,7 @@ H5Z__scaleoffset_convert(void *buf, unsigned d_nelmts, unsigned dtype_size)
unsigned char *buffer, temp;
buffer = (unsigned char *)buf;
- for (i = 0; i < d_nelmts * dtype_size; i += dtype_size)
+ for (i = 0; i < d_nelmts * (size_t)dtype_size; i += dtype_size)
for (j = 0; j < dtype_size / 2; j++) {
/* swap pair of bytes */
temp = buffer[i + j];
@@ -1681,7 +1681,7 @@ H5Z__scaleoffset_decompress(unsigned char *data, unsigned d_nelmts, unsigned cha
unsigned buf_len;
/* must initialize to zeros */
- for (i = 0; i < d_nelmts * p.size; i++)
+ for (i = 0; i < d_nelmts * (size_t)p.size; i++)
data[i] = 0;
/* initialization before the loop */
diff --git a/src/H5module.h b/src/H5module.h
index 6d3cba8..f7d3cd6 100644
--- a/src/H5module.h
+++ b/src/H5module.h
@@ -11,9 +11,9 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Purpose: This file contains declarations which define macros for the
- * H5 package. Including this header means that the source file
- * is part of the H5 package.
+ * Purpose: This file contains declarations which define macros for the
+ * H5 package. Including this header means that the source file
+ * is part of the H5 package.
*/
#ifndef H5module_H
#define H5module_H
diff --git a/src/H5mpi.c b/src/H5mpi.c
index aea0104..f5d709a 100644
--- a/src/H5mpi.c
+++ b/src/H5mpi.c
@@ -549,4 +549,237 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5_mpio_create_large_type() */
+/*-------------------------------------------------------------------------
+ * Function: H5_mpio_gatherv_alloc
+ *
+ * Purpose: A wrapper around MPI_(All)gatherv that performs allocation
+ * of the receive buffer on the caller's behalf. This
+ * routine's parameters are as follows:
+ *
+ * `send_buf` - The buffer that data will be sent from for
+ * the calling MPI rank. Analogous to
+ * MPI_(All)gatherv's `sendbuf` parameter.
+ *
+ * `send_count` - The number of `send_type` elements in the
+ * send buffer. Analogous to MPI_(All)gatherv's
+ * `sendcount` parameter.
+ *
+ * `send_type` - The MPI Datatype of the elements in the send
+ * buffer. Analogous to MPI_(All)gatherv's
+ * `sendtype` parameter.
+ *
+ * `recv_counts` - An array containing the number of elements
+ * to be received from each MPI rank.
+ * Analogous to MPI_(All)gatherv's `recvcount`
+ * parameter.
+ *
+ * `displacements` - An array containing the displacements
+ * in the receive buffer where data from
+ * each MPI rank should be placed. Analogous
+ * to MPI_(All)gatherv's `displs` parameter.
+ *
+ * `recv_type` - The MPI Datatype of the elements in the
+ * receive buffer. Analogous to
+ * MPI_(All)gatherv's `recvtype` parameter.
+ *
+ * `allgather` - Specifies whether the gather operation to be
+ * performed should be MPI_Allgatherv (TRUE) or
+ * MPI_Gatherv (FALSE).
+ *
+ * `root` - For MPI_Gatherv operations, specifies the rank
+ * that will receive the data sent by other ranks.
+ * Analogous to MPI_Gatherv's `root` parameter. For
+ * MPI_Allgatherv operations, this parameter is
+ * ignored.
+ *
+ * `comm` - Specifies the MPI Communicator for the operation.
+ * Analogous to MPI_(All)gatherv's `comm` parameter.
+ *
+ * `mpi_rank` - Specifies the calling rank's rank value, as
+ * obtained by calling MPI_Comm_rank on the
+ * MPI Communicator `comm`.
+ *
+ * `mpi_size` - Specifies the MPI Communicator size, as
+ * obtained by calling MPI_Comm_size on the
+ * MPI Communicator `comm`.
+ *
+ * `out_buf` - Resulting buffer that is allocated and
+ * returned to the caller after data has been
+ * gathered into it. Returned only to the rank
+ * specified by `root` for MPI_Gatherv
+ * operations, or to all ranks for
+ * MPI_Allgatherv operations.
+ *
+ * `out_buf_num_entries` - The number of elements in the
+ * resulting buffer, in terms of
+ * the MPI Datatype provided for
+ * `recv_type`.
+ *
+ * Notes: This routine is collective across `comm`.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5_mpio_gatherv_alloc(void *send_buf, int send_count, MPI_Datatype send_type, const int recv_counts[],
+ const int displacements[], MPI_Datatype recv_type, hbool_t allgather, int root,
+ MPI_Comm comm, int mpi_rank, int mpi_size, void **out_buf, size_t *out_buf_num_entries)
+{
+ size_t recv_buf_num_entries = 0;
+ void * recv_buf = NULL;
+#if H5_CHECK_MPI_VERSION(3, 0)
+ MPI_Count type_lb;
+ MPI_Count type_extent;
+#else
+ MPI_Aint type_lb;
+ MPI_Aint type_extent;
+#endif
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(send_buf || send_count == 0);
+ if (allgather || (mpi_rank == root))
+ HDassert(out_buf && out_buf_num_entries);
+
+ /* Retrieve the extent of the MPI Datatype being used */
+#if H5_CHECK_MPI_VERSION(3, 0)
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_get_extent_x(recv_type, &type_lb, &type_extent)))
+#else
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_get_extent(recv_type, &type_lb, &type_extent)))
+#endif
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_get_extent(_x) failed", mpi_code)
+
+ if (type_extent < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "MPI recv_type had a negative extent")
+
+ /*
+ * Calculate the total size of the buffer being
+ * returned and allocate it
+ */
+ if (allgather || (mpi_rank == root)) {
+ size_t i;
+ size_t buf_size;
+
+ for (i = 0, recv_buf_num_entries = 0; i < (size_t)mpi_size; i++)
+ recv_buf_num_entries += (size_t)recv_counts[i];
+ buf_size = recv_buf_num_entries * (size_t)type_extent;
+
+ /* If our buffer size is 0, there's nothing to do */
+ if (buf_size == 0)
+ HGOTO_DONE(SUCCEED)
+
+ if (NULL == (recv_buf = H5MM_malloc(buf_size)))
+ /* Push an error, but still participate in collective gather operation */
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "couldn't allocate receive buffer")
+ }
+
+ /* Perform gather operation */
+ if (allgather) {
+ if (MPI_SUCCESS != (mpi_code = MPI_Allgatherv(send_buf, send_count, send_type, recv_buf, recv_counts,
+ displacements, recv_type, comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Allgatherv failed", mpi_code)
+ }
+ else {
+ if (MPI_SUCCESS != (mpi_code = MPI_Gatherv(send_buf, send_count, send_type, recv_buf, recv_counts,
+ displacements, recv_type, root, comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Gatherv failed", mpi_code)
+ }
+
+ if (allgather || (mpi_rank == root)) {
+ *out_buf = recv_buf;
+ *out_buf_num_entries = recv_buf_num_entries;
+ }
+
+done:
+ if (ret_value < 0) {
+ if (recv_buf)
+ H5MM_free(recv_buf);
+ }
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5_mpio_gatherv_alloc() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5_mpio_gatherv_alloc_simple
+ *
+ * Purpose: A slightly simplified interface to H5_mpio_gatherv_alloc
+ * which calculates the receive counts and receive buffer
+ * displacements for the caller.
+ *
+ * Notes: This routine is collective across `comm`.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5_mpio_gatherv_alloc_simple(void *send_buf, int send_count, MPI_Datatype send_type, MPI_Datatype recv_type,
+ hbool_t allgather, int root, MPI_Comm comm, int mpi_rank, int mpi_size,
+ void **out_buf, size_t *out_buf_num_entries)
+{
+ int * recv_counts_disps_array = NULL;
+ int mpi_code;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(send_buf || send_count == 0);
+ if (allgather || (mpi_rank == root))
+ HDassert(out_buf && out_buf_num_entries);
+
+ /*
+ * Allocate array to store the receive counts of each rank, as well as
+ * the displacements into the final array where each rank will place
+ * their data. The first half of the array contains the receive counts
+ * (in rank order), while the latter half contains the displacements
+ * (also in rank order).
+ */
+ if (allgather || (mpi_rank == root)) {
+ if (NULL ==
+ (recv_counts_disps_array = H5MM_malloc(2 * (size_t)mpi_size * sizeof(*recv_counts_disps_array))))
+ /* Push an error, but still participate in collective gather operation */
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL,
+ "couldn't allocate receive counts and displacements array")
+ }
+
+ /* Collect each rank's send count to interested ranks */
+ if (allgather) {
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Allgather(&send_count, 1, MPI_INT, recv_counts_disps_array, 1, MPI_INT, comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Allgather failed", mpi_code)
+ }
+ else {
+ if (MPI_SUCCESS !=
+ (mpi_code = MPI_Gather(&send_count, 1, MPI_INT, recv_counts_disps_array, 1, MPI_INT, root, comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code)
+ }
+
+ /* Set the displacements into the receive buffer for the gather operation */
+ if (allgather || (mpi_rank == root)) {
+ size_t i;
+ int * displacements_ptr;
+
+ displacements_ptr = &recv_counts_disps_array[mpi_size];
+
+ *displacements_ptr = 0;
+ for (i = 1; i < (size_t)mpi_size; i++)
+ displacements_ptr[i] = displacements_ptr[i - 1] + recv_counts_disps_array[i - 1];
+ }
+
+ /* Perform gather operation */
+ if (H5_mpio_gatherv_alloc(send_buf, send_count, send_type, recv_counts_disps_array,
+ &recv_counts_disps_array[mpi_size], recv_type, allgather, root, comm, mpi_rank,
+ mpi_size, out_buf, out_buf_num_entries) < 0)
+ HGOTO_ERROR(H5E_LIB, H5E_CANTGATHER, FAIL, "can't gather data")
+
+done:
+ if (recv_counts_disps_array)
+ H5MM_free(recv_counts_disps_array);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5_mpio_gatherv_alloc_simple() */
+
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5private.h b/src/H5private.h
index 765e7b6..d98d76d 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -44,10 +44,12 @@
#include <sys/time.h>
#endif
#ifdef H5_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
#ifdef H5_HAVE_PWD_H
#include <pwd.h>
#endif
-#include <unistd.h>
+#ifdef H5_HAVE_WAITPID
#include <sys/wait.h>
#endif
@@ -170,7 +172,7 @@
*/
#define BEGIN_MPE_LOG \
if (H5_MPEinit_g) { \
- sprintf(p_event_start, "start %s", __func__); \
+ snprintf(p_event_start, sizeof(p_event_start), "start %s", __func__); \
if (eventa(__func__) == -1 && eventb(__func__) == -1) { \
const char *p_color = "red"; \
eventa(__func__) = MPE_Log_get_event_number(); \
@@ -385,6 +387,25 @@
#define HSSIZET_MAX ((hssize_t)LLONG_MAX)
#define HSSIZET_MIN (~(HSSIZET_MAX))
+#ifdef H5_HAVE_PARALLEL
+
+/* Define a type for safely sending size_t values with MPI */
+#if SIZE_MAX == UCHAR_MAX
+#define H5_SIZE_T_AS_MPI_TYPE MPI_UNSIGNED_CHAR
+#elif SIZE_MAX == USHRT_MAX
+#define H5_SIZE_T_AS_MPI_TYPE MPI_UNSIGNED_SHORT
+#elif SIZE_MAX == UINT_MAX
+#define H5_SIZE_T_AS_MPI_TYPE MPI_UNSIGNED
+#elif SIZE_MAX == ULONG_MAX
+#define H5_SIZE_T_AS_MPI_TYPE MPI_UNSIGNED_LONG
+#elif SIZE_MAX == ULLONG_MAX
+#define H5_SIZE_T_AS_MPI_TYPE MPI_UNSIGNED_LONG_LONG
+#else
+#error "no suitable MPI type for size_t"
+#endif
+
+#endif /* H5_HAVE_PARALLEL */
+
/*
* Types and max sizes for POSIX I/O.
* OS X (Darwin) is odd since the max I/O size does not match the types.
@@ -506,6 +527,9 @@
#define H5_GCC_CLANG_DIAG_ON(x)
#endif
+/* Function pointer typedef for qsort */
+typedef int (*H5_sort_func_cb_t)(const void *, const void *);
+
/* Typedefs and functions for timing certain parts of the library. */
/* A set of elapsed/user/system times emitted as a time point by the
@@ -574,7 +598,7 @@ typedef off_t h5_stat_size_t;
#define HDoff_t off_t
#endif
-#/* Redefine all the POSIX and C functions. We should never see an
+/* Redefine all the POSIX and C functions. We should never see an
* undecorated POSIX or C function (or any other non-HDF5 function)
* in the source.
*/
@@ -1130,57 +1154,6 @@ H5_DLL H5_ATTR_CONST int Nflock(int fd, int operation);
#ifndef HDprintf
#define HDprintf printf /*varargs*/
#endif
-#ifndef HDpthread_attr_destroy
-#define HDpthread_attr_destroy(A) pthread_attr_destroy(A)
-#endif
-#ifndef HDpthread_attr_init
-#define HDpthread_attr_init(A) pthread_attr_init(A)
-#endif
-#ifndef HDpthread_attr_setscope
-#define HDpthread_attr_setscope(A, S) pthread_attr_setscope(A, S)
-#endif
-#ifndef HDpthread_cond_init
-#define HDpthread_cond_init(C, A) pthread_cond_init(C, A)
-#endif
-#ifndef HDpthread_cond_signal
-#define HDpthread_cond_signal(C) pthread_cond_signal(C)
-#endif
-#ifndef HDpthread_cond_wait
-#define HDpthread_cond_wait(C, M) pthread_cond_wait(C, M)
-#endif
-#ifndef HDpthread_create
-#define HDpthread_create(R, A, F, U) pthread_create(R, A, F, U)
-#endif
-#ifndef HDpthread_equal
-#define HDpthread_equal(T1, T2) pthread_equal(T1, T2)
-#endif
-#ifndef HDpthread_getspecific
-#define HDpthread_getspecific(K) pthread_getspecific(K)
-#endif
-#ifndef HDpthread_join
-#define HDpthread_join(T, V) pthread_join(T, V)
-#endif
-#ifndef HDpthread_key_create
-#define HDpthread_key_create(K, D) pthread_key_create(K, D)
-#endif
-#ifndef HDpthread_mutex_init
-#define HDpthread_mutex_init(M, A) pthread_mutex_init(M, A)
-#endif
-#ifndef HDpthread_mutex_lock
-#define HDpthread_mutex_lock(M) pthread_mutex_lock(M)
-#endif
-#ifndef HDpthread_mutex_unlock
-#define HDpthread_mutex_unlock(M) pthread_mutex_unlock(M)
-#endif
-#ifndef HDpthread_self
-#define HDpthread_self() pthread_self()
-#endif
-#ifndef HDpthread_setcancelstate
-#define HDpthread_setcancelstate(N, O) pthread_setcancelstate(N, O)
-#endif
-#ifndef HDpthread_setspecific
-#define HDpthread_setspecific(K, V) pthread_setspecific(K, V)
-#endif
#ifndef HDputc
#define HDputc(C, F) putc(C, F)
#endif
@@ -1766,6 +1739,15 @@ typedef struct H5_debug_t {
} H5_debug_t;
#ifdef H5_HAVE_PARALLEL
+
+/*
+ * Check that the MPI library version is at least version
+ * `mpi_version` and subversion `mpi_subversion`
+ */
+#define H5_CHECK_MPI_VERSION(mpi_version, mpi_subversion) \
+ ((MPI_VERSION > (mpi_version)) || \
+ ((MPI_VERSION == (mpi_version)) && (MPI_SUBVERSION >= (mpi_subversion))))
+
extern hbool_t H5_coll_api_sanity_check_g;
#endif /* H5_HAVE_PARALLEL */
@@ -2006,6 +1988,14 @@ extern hbool_t H5_libterm_g; /* Is the library being shutdown? */
#endif /* H5_HAVE_THREADSAFE */
+/* Extern global to determine if we should use selection I/O if available (this
+ * variable should be removed once selection I/O performs as well as the
+ * previous scalar I/O implementation
+ *
+ * NOTE: Must be exposed via H5_DLLVAR so parallel tests pass on Windows.
+ */
+H5_DLLVAR hbool_t H5_use_selection_io_g;
+
#ifdef H5_HAVE_CODESTACK
/* Include required function stack header */
@@ -2500,6 +2490,16 @@ H5_DLL herr_t H5CX_pop(hbool_t update_dxpl_props);
#define HDcompile_assert(e) do { typedef struct { unsigned int b: (e); } x; } while(0)
*/
+/* Private typedefs */
+
+/* Union for const/non-const pointer for use by functions that manipulate
+ * pointers but do not write to their targets or return pointers to const
+ * specified locations. This helps us avoid compiler warnings. */
+typedef union {
+ void * vp;
+ const void *cvp;
+} H5_flexible_const_ptr_t;
+
/* Private functions, not part of the publicly documented API */
H5_DLL herr_t H5_init_library(void);
H5_DLL void H5_term_library(void);
@@ -2600,7 +2600,8 @@ struct h5_long_options {
*/
};
-H5_DLL int H5_get_option(int argc, const char **argv, const char *opt, const struct h5_long_options *l_opt);
+H5_DLL int H5_get_option(int argc, const char *const *argv, const char *opt,
+ const struct h5_long_options *l_opt);
#ifdef H5_HAVE_PARALLEL
/* Generic MPI functions */
@@ -2614,6 +2615,14 @@ H5_DLL herr_t H5_mpi_comm_cmp(MPI_Comm comm1, MPI_Comm comm2, int *result);
H5_DLL herr_t H5_mpi_info_cmp(MPI_Info info1, MPI_Info info2, int *result);
H5_DLL herr_t H5_mpio_create_large_type(hsize_t num_elements, MPI_Aint stride_bytes, MPI_Datatype old_type,
MPI_Datatype *new_type);
+H5_DLL herr_t H5_mpio_gatherv_alloc(void *send_buf, int send_count, MPI_Datatype send_type,
+ const int recv_counts[], const int displacements[],
+ MPI_Datatype recv_type, hbool_t allgather, int root, MPI_Comm comm,
+ int mpi_rank, int mpi_size, void **out_buf, size_t *out_buf_num_entries);
+H5_DLL herr_t H5_mpio_gatherv_alloc_simple(void *send_buf, int send_count, MPI_Datatype send_type,
+ MPI_Datatype recv_type, hbool_t allgather, int root, MPI_Comm comm,
+ int mpi_rank, int mpi_size, void **out_buf,
+ size_t *out_buf_num_entries);
#endif /* H5_HAVE_PARALLEL */
/* Functions for debugging */
diff --git a/src/H5public.h b/src/H5public.h
index 6a3911c..3f9848a 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -83,7 +83,7 @@
/**
* For tweaks, bug-fixes, or development
*/
-#define H5_VERS_RELEASE 1
+#define H5_VERS_RELEASE 2
/**
* For pre-releases like \c snap0. Empty string for official releases.
*/
@@ -91,7 +91,7 @@
/**
* Full version string
*/
-#define H5_VERS_INFO "HDF5 library version: 1.13.1-1"
+#define H5_VERS_INFO "HDF5 library version: 1.13.2-1"
#define H5check() H5check_version(H5_VERS_MAJOR, H5_VERS_MINOR, H5_VERS_RELEASE)
@@ -289,6 +289,11 @@ typedef long long ssize_t;
* \internal Defined as a (minimum) 64-bit integer type.
*/
typedef uint64_t hsize_t;
+
+#ifdef H5_HAVE_PARALLEL
+#define HSIZE_AS_MPI_TYPE MPI_UINT64_T
+#endif
+
/**
* The size of file objects. Used when negative values are needed to indicate errors.
*
@@ -323,7 +328,7 @@ typedef uint64_t haddr_t;
#define HADDR_MAX (HADDR_UNDEF - 1)
#ifdef H5_HAVE_PARALLEL
-#define HADDR_AS_MPI_TYPE MPI_LONG_LONG_INT
+#define HADDR_AS_MPI_TYPE MPI_UINT64_T
#endif
//! <!-- [H5_iter_order_t_snip] -->
diff --git a/src/H5system.c b/src/H5system.c
index 9a966b0..a369e3d 100644
--- a/src/H5system.c
+++ b/src/H5system.c
@@ -862,7 +862,7 @@ H5_nanosleep(uint64_t nanosec)
#else
- const uint64_t nanosec_per_sec = 1000 * 1000 * 1000;
+ const uint64_t nanosec_per_sec = 1000 * 1000L * 1000;
struct timespec sleeptime; /* Struct to hold time to sleep */
/* Set up time to sleep
@@ -956,7 +956,7 @@ const char *H5_optarg; /* Flag argument (or value) */
*-------------------------------------------------------------------------
*/
int
-H5_get_option(int argc, const char **argv, const char *opts, const struct h5_long_options *l_opts)
+H5_get_option(int argc, const char *const *argv, const char *opts, const struct h5_long_options *l_opts)
{
static int sp = 1; /* character index in current token */
int optchar = '?'; /* option character passed back to user */
@@ -1033,7 +1033,7 @@ H5_get_option(int argc, const char **argv, const char *opts, const struct h5_lon
HDfree(arg);
}
else {
- register char *cp; /* pointer into current token */
+ char *cp; /* pointer into current token */
/* short command line option */
optchar = argv[H5_optind][sp];
diff --git a/src/H5timer.c b/src/H5timer.c
index b2cc5f0..b5dba97 100644
--- a/src/H5timer.c
+++ b/src/H5timer.c
@@ -193,17 +193,26 @@ H5_now_usec(void)
struct timespec ts;
HDclock_gettime(CLOCK_MONOTONIC, &ts);
- now = (uint64_t)(ts.tv_sec * (1000 * 1000)) + (uint64_t)(ts.tv_nsec / 1000);
+
+ /* Cast all values in this expression to uint64_t to ensure that all intermediate
+ * calculations are done in 64 bit, to prevent overflow */
+ now = ((uint64_t)ts.tv_sec * ((uint64_t)1000 * (uint64_t)1000)) +
+ ((uint64_t)ts.tv_nsec / (uint64_t)1000);
}
#elif defined(H5_HAVE_GETTIMEOFDAY)
{
struct timeval now_tv;
HDgettimeofday(&now_tv, NULL);
- now = (uint64_t)(now_tv.tv_sec * (1000 * 1000)) + (uint64_t)now_tv.tv_usec;
+
+ /* Cast all values in this expression to uint64_t to ensure that all intermediate
+ * calculations are done in 64 bit, to prevent overflow */
+ now = ((uint64_t)now_tv.tv_sec * ((uint64_t)1000 * (uint64_t)1000)) + (uint64_t)now_tv.tv_usec;
}
#else /* H5_HAVE_GETTIMEOFDAY */
- now = (uint64_t)(HDtime(NULL) * (1000 * 1000));
+ /* Cast all values in this expression to uint64_t to ensure that all intermediate calculations
+ * are done in 64 bit, to prevent overflow */
+ now = ((uint64_t)HDtime(NULL) * ((uint64_t)1000 * (uint64_t)1000));
#endif /* H5_HAVE_GETTIMEOFDAY */
return (now);
diff --git a/src/H5trace.c b/src/H5trace.c
index baf6a10..8790a88 100644
--- a/src/H5trace.c
+++ b/src/H5trace.c
@@ -1248,7 +1248,7 @@ H5_trace_args(H5RS_str_t *rs, const char *type, va_list ap)
{
H5FD_class_t cls = HDva_arg(ap, H5FD_class_t);
- H5RS_asprintf_cat(rs, "{'%s', " H5_PRINTF_HADDR_FMT ", ", cls.name, cls.maxaddr);
+ H5RS_asprintf_cat(rs, "{'%s', %" PRIuHADDR ", ", cls.name, cls.maxaddr);
H5_trace_args_close_degree(rs, cls.fc_degree);
H5RS_acat(rs, ", ...}");
} /* end block */
@@ -4023,7 +4023,7 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
H5_timer_get_times(function_timer, &function_times);
H5_timer_get_times(running_timer, &running_times);
- HDsprintf(tmp, "%.6f", (function_times.elapsed - running_times.elapsed));
+ HDsnprintf(tmp, sizeof(tmp), "%.6f", (function_times.elapsed - running_times.elapsed));
H5RS_asprintf_cat(rs, " %*s ", (int)HDstrlen(tmp), "");
}
for (i = 0; i < current_depth; i++)
diff --git a/src/Makefile.am b/src/Makefile.am
index c4023ae..edfd9b0 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -74,12 +74,11 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5lib_settings.c H5system.c \
H5HFspace.c H5HFstat.c H5HFtest.c H5HFtiny.c \
H5HG.c H5HGcache.c H5HGdbg.c H5HGquery.c \
H5HL.c H5HLcache.c H5HLdbg.c H5HLint.c H5HLprfx.c H5HLdblk.c \
- H5HP.c \
H5I.c H5Idbg.c H5Iint.c H5Itest.c \
H5L.c H5Ldeprec.c H5Lexternal.c H5Lint.c \
H5M.c \
H5MF.c H5MFaggr.c H5MFdbg.c H5MFsection.c \
- H5MM.c H5MP.c H5MPtest.c \
+ H5MM.c \
H5O.c H5Odeprec.c H5Oainfo.c H5Oalloc.c H5Oattr.c H5Oattribute.c \
H5Obogus.c H5Obtreek.c H5Ocache.c H5Ocache_image.c H5Ochunk.c \
H5Ocont.c H5Ocopy.c H5Ocopy_ref.c H5Odbg.c H5Odrvinfo.c H5Odtype.c \
diff --git a/test/AtomicWriterReader.txt b/test/AtomicWriterReader.txt
index dc0a3bd..064ba39 100644
--- a/test/AtomicWriterReader.txt
+++ b/test/AtomicWriterReader.txt
@@ -11,7 +11,7 @@ atomic_reader.c: is the "read" part of the test.
Building the Tests
------------------
-The two test parts are automically built during configure and make process.
+The two test parts are automatically built during configure and make process.
But to build them individually, you can do in test/ directory:
$ gcc atomic_writer
$ gcc atomic_reader
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 4e543fd..f9e6e4b 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -266,7 +266,6 @@ set (testhdf5_SOURCES
${HDF5_TEST_SOURCE_DIR}/tgenprop.c
${HDF5_TEST_SOURCE_DIR}/th5o.c
${HDF5_TEST_SOURCE_DIR}/th5s.c
- ${HDF5_TEST_SOURCE_DIR}/theap.c
${HDF5_TEST_SOURCE_DIR}/tid.c
${HDF5_TEST_SOURCE_DIR}/titerate.c
${HDF5_TEST_SOURCE_DIR}/tmeta.c
@@ -318,7 +317,6 @@ set (H5_TESTS
earray
btree2
fheap
- pool
accum
hyperslab
istore
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index b2ed8e8..a534e60 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -205,18 +205,12 @@ add_test (
)
set_tests_properties (H5TEST-testhdf5-clear-objects PROPERTIES FIXTURES_SETUP clear_testhdf5)
-add_test (NAME H5TEST-testhdf5-base COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:testhdf5> -x heap -x file -x select)
+add_test (NAME H5TEST-testhdf5-base COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:testhdf5> -x file -x select)
set_tests_properties (H5TEST-testhdf5-base PROPERTIES
FIXTURES_REQUIRED clear_testhdf5
ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
-add_test (NAME H5TEST-testhdf5-heap COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:testhdf5> -o heap)
-set_tests_properties (H5TEST-testhdf5-heap PROPERTIES
- FIXTURES_REQUIRED clear_testhdf5
- ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
- WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
-)
add_test (NAME H5TEST-testhdf5-file COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:testhdf5> -o file)
set_tests_properties (H5TEST-testhdf5-file PROPERTIES
FIXTURES_REQUIRED clear_testhdf5
@@ -649,10 +643,11 @@ set_tests_properties (H5TEST-tcheck_version-minor PROPERTIES
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
WILL_FAIL "true"
)
-# release + 1 should pass
+# release + 1 should pass on non-develop branches
add_test (NAME H5TEST-tcheck_version-release COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:tcheck_version> "-tr")
set_tests_properties (H5TEST-tcheck_version-release PROPERTIES
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
+ WILL_FAIL "true"
)
##############################################################################
@@ -673,18 +668,18 @@ set_tests_properties (H5TEST-tcheck_version-release PROPERTIES
# flushrefresh
##############################################################################
# autotools script tests
-# error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
-# NOT CONVERTED accum_swmr_reader is used by accum.c.
-# NOT CONVERTED atomic_writer and atomic_reader are standalone programs.
-# links_env is used by testlinks_env.sh
-# filenotclosed and del_many_dense_attrs are used by testabort_fail.sh
-# NOT CONVERTED flushrefresh is used by testflushrefresh.sh.
-# NOT CONVERTED use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
-# NOT CONVERTED swmr_* files (besides swmr.c) are used by testswmr.sh.
-# NOT CONVERTED vds_swmr_* files are used by testvdsswmr.sh
-# NOT CONVERTED 'make check' doesn't run them directly, so they are not included in TEST_PROG.
-# NOT CONVERTED Also build testmeta, which is used for timings test. It builds quickly,
-# NOT CONVERTED and this lets automake keep all its test programs in one place.
+# error_test and err_compat are built at the same time as the other tests, but executed by test_error.sh
+# NOT CONVERTED accum_swmr_reader is used by accum.c
+# NOT CONVERTED atomic_writer and atomic_reader are stand-alone programs
+# links_env is used by test_links_env.sh
+# filenotclosed and del_many_dense_attrs are used by test_abort_fail.sh
+# NOT CONVERTED flushrefresh is used by test_flush_refresh.sh
+# NOT CONVERTED use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_use_cases.sh
+# NOT CONVERTED swmr_* files (besides swmr.c) are used by test_swmr.sh
+# NOT CONVERTED vds_swmr_* files are used by test_vds_swmr.sh
+# 'make check' doesn't run them directly, so they are not included in TEST_PROG.
+# Also build testmeta, which is used for timings test. It builds quickly
+# and this lets automake keep all its test programs in one place.
##############################################################################
#-- Adding test for filenotclosed
@@ -852,16 +847,6 @@ if (BUILD_SHARED_LIBS)
ENVIRONMENT "HDF5_PLUGIN_PATH=${CMAKE_BINARY_DIR}/filter_plugin_dir1${CMAKE_SEP}${CMAKE_BINARY_DIR}/filter_plugin_dir2;srcdir=${HDF5_TEST_BINARY_DIR}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}
)
-
-##############################################################################
-# HDFFV-9655 relative plugin test disabled
-#
-# add_test (NAME H5PLUGIN-pluginRelative COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} $<TARGET_FILE:plugin>)
-# set_tests_properties (H5PLUGIN-pluginRelative PROPERTIES
-# ENVIRONMENT "HDF5_PLUGIN_PATH=@/${BIN_REL_PATH}testdir1${CMAKE_SEP}@/${BIN_REL_PATH}testdir2;srcdir=${HDF5_TEST_BINARY_DIR}"
-# WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}
-# )
-##############################################################################
endif ()
option (TEST_SHELL_SCRIPTS "Enable shell script tests" ON)
@@ -874,10 +859,10 @@ if (ENABLE_EXTENDED_TESTS)
##############################################################################
### S W M R T E S T S
##############################################################################
-# testflushrefresh.sh: flushrefresh
-# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
-# testswmr.sh: swmr*
-# testvdsswmr.sh: vds_swmr*
+# test_flush_refresh.sh: flushrefresh
+# test_use_cases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+# test_swmr.sh: swmr*
+# test_vds_swmr.sh: vds_swmr*
#-- Adding test for flushrefresh
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/flushrefresh_test")
diff --git a/test/Makefile.am b/test/Makefile.am
index ff09003..fcfd34e 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -22,21 +22,23 @@ include $(top_srcdir)/config/commence.am
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src
# Test scripts--
-# testerror.sh: err_compat, error_test
-# testlibinfo.sh:
-# testcheck_version.sh: tcheck_version
-# testlinks_env.sh: links_env
-# testexternal_env.sh: external_env
-# testflushrefresh.sh: flushrefresh
-# testvds_env.sh: vds_env
-# testswmr.sh: swmr*
-# testvdsswmr.sh: vds_swmr*
-# testabort_fail.sh: filenotclosed.c and del_many_dense_attrs.c
-# test_filter_plugin.sh: filter_plugin.c
+# test_abort_fail.sh: filenotclosed.c and del_many_dense_attrs.c
+# test_check_version.sh: tcheck_version
+# test_error.sh: err_compat, error_test
+# test_external_env.sh: external_env
+# test_flush_refresh.sh: flushrefresh
+# test_libinfo.sh:
+# test_links_env.sh: links_env
# test_mirror.sh: mirror_vfd ../utils/mirror_vfd/*
-# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
-TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh testexternal_env.sh \
- testswmr.sh testvds_env.sh testvdsswmr.sh testflushrefresh.sh test_usecases.sh testabort_fail.sh
+# test_plugin.sh: filter_plugin.c vfd_plugin.c vol_plugin.c
+# test_swmr.sh: swmr*
+# test_vds_env.sh: vds_env
+# test_vds_swmr.sh: vds_swmr*
+# test_use_cases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+TEST_SCRIPT = test_abort_fail.sh test_check_version.sh test_error.sh \
+ test_flush_refresh.sh test_external_env.sh test_libinfo.sh \
+ test_links_env.sh test_swmr.sh test_vds_env.sh test_vds_swmr.sh \
+ test_use_cases.sh
SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \
external_env$(EXEEXT) filenotclosed$(EXEEXT) del_many_dense_attrs$(EXEEXT) \
flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \
@@ -45,10 +47,9 @@ SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \
swmr_sparse_reader$(EXEEXT) swmr_sparse_writer$(EXEEXT) swmr_start_write$(EXEEXT) \
vds_env$(EXEEXT) vds_swmr_gen$(EXEEXT) vds_swmr_reader$(EXEEXT) vds_swmr_writer$(EXEEXT)
if HAVE_SHARED_CONDITIONAL
- TEST_SCRIPT += test_filter_plugin.sh test_vol_plugin.sh
- SCRIPT_DEPEND += filter_plugin$(EXEEXT) vol_plugin$(EXEEXT)
+ TEST_SCRIPT += test_plugin.sh
+ SCRIPT_DEPEND += filter_plugin$(EXEEXT) vfd_plugin $(EXEEXT) vol_plugin$(EXEEXT)
endif
-
if MIRROR_VFD_CONDITIONAL
TEST_SCRIPT += test_mirror.sh
endif
@@ -62,7 +63,7 @@ check_SCRIPTS = $(TEST_SCRIPT)
TEST_PROG= testhdf5 \
cache cache_api cache_image cache_tagging lheap ohdr \
stab gheap evict_on_close farray earray btree2 fheap \
- pool accum hyperslab istore bittests dt_arith page_buffer \
+ accum hyperslab istore bittests dt_arith page_buffer \
dtypes dsets chunk_info cmpd_dset cmpd_dtransform filter_fail extend direct_chunk \
external efc objcopy objcopy_ref links unlink twriteorder big mtime \
fillval mount \
@@ -71,22 +72,25 @@ TEST_PROG= testhdf5 \
dangle dtransform reserved cross_read freespace mf vds file_image \
unregister cache_logging cork swmr thread_id vol timer event_set
-# List programs to be built when testing here.
-# error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
-# tcheck_version is used by testcheck_version.sh.
-# accum_swmr_reader is used by accum.c.
-# atomic_writer and atomic_reader are standalone programs.
-# links_env is used by testlinks_env.sh
-# external_env is used by testexternal_env.sh
-# filenotclosed and del_many_dense_attrs are used by testabort_fail.sh
-# flushrefresh is used by testflushrefresh.sh.
-# use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
-# swmr_* files (besides swmr.c) are used by testswmr.sh.
-# vds_swmr_* files are used by testvdsswmr.sh
-# vds_env is used by testvds_env.sh
+# List programs to be built when testing here
+#
+# error_test and err_compat are built at the same time as the other tests, but executed by test_error.sh
+# tcheck_version is used by test_check_version.sh
+# accum_swmr_reader is used by accum.c
+# atomic_writer and atomic_reader are stand-alone programs
+# links_env is used by test_links_env.sh
+# external_env is used by test_external_env.sh
+# filenotclosed and del_many_dense_attrs are used by test_abort_fail.sh
+# flushrefresh is used by test_flush_refresh.sh
+# use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_use_cases.sh
+# swmr_* files (besides swmr.c) are used by test_swmr.sh
+# vds_swmr_* files are used by test_vds_swmr.sh
+# vds_env is used by test_vds_env.sh
# mirror_vfd is used by test_mirror.sh
+#
# 'make check' doesn't run them directly, so they are not included in TEST_PROG.
-# Also build testmeta, which is used for timings test. It builds quickly,
+#
+# Also build testmeta, which is used for the timing test. It builds quickly
# and this lets automake keep all its test programs in one place.
check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \
testmeta accum_swmr_reader atomic_writer atomic_reader external_env \
@@ -98,7 +102,6 @@ check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \
if HAVE_SHARED_CONDITIONAL
check_PROGRAMS+= filter_plugin vfd_plugin vol_plugin
endif
-
if MIRROR_VFD_CONDITIONAL
check_PROGRAMS+= mirror_vfd
endif
@@ -227,7 +230,7 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
# Sources for testhdf5 executable
testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
- tgenprop.c th5o.c th5s.c tcoords.c theap.c tid.c titerate.c tmeta.c tmisc.c \
+ tgenprop.c th5o.c th5s.c tcoords.c tid.c titerate.c tmeta.c tmisc.c \
trefer.c trefer_deprec.c trefstr.c tselect.c tskiplist.c tsohm.c ttime.c tunicode.c \
tvlstr.c tvltypes.c
diff --git a/test/ShellTests.cmake b/test/ShellTests.cmake
index e7de735..140da6f 100644
--- a/test/ShellTests.cmake
+++ b/test/ShellTests.cmake
@@ -17,21 +17,21 @@
find_program (PWSH NAMES pwsh powershell)
if (PWSH)
- file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/usecases_test")
+ file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/use_cases_test")
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/swmr_test")
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST/vds_swmr_test")
set (srcdir ${HDF5_TEST_SOURCE_DIR})
set (H5_UTILS_TEST_BUILDDIR ${CMAKE_TEST_OUTPUT_DIRECTORY})
set (H5_TEST_BUILDDIR ${HDF5_TEST_BINARY_DIR}/H5TEST)
- configure_file(${HDF5_TEST_SOURCE_DIR}/testswmr.pwsh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testswmr.ps1 @ONLY)
+ configure_file(${HDF5_TEST_SOURCE_DIR}/test_swmr.pwsh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_swmr.ps1 @ONLY)
# test commented out as currently the programs are not allowing another access to the data file
#add_test (H5SHELL-testswmr ${PWSH} ${HDF5_TEST_BINARY_DIR}/H5TEST/testswmr.ps1)
#set_tests_properties (H5SHELL-testswmr PROPERTIES
# ENVIRONMENT "PATH=$ENV{PATH}:${CMAKE_RUNTIME_OUTPUT_DIRECTORY}"
# WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
#)
- configure_file(${HDF5_TEST_SOURCE_DIR}/testvdsswmr.pwsh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testvdsswmr.ps1 @ONLY)
+ configure_file(${HDF5_TEST_SOURCE_DIR}/test_vds_swmr.pwsh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_vds_swmr.ps1 @ONLY)
# test commented out as currently the programs are not allowing another access to the data file
#add_test (H5SHELL-testvdsswmr ${PWSH} ${HDF5_TEST_BINARY_DIR}/H5TEST/testvdsswmr.ps1)
#set_tests_properties (H5SHELL-testvdsswmr PROPERTIES
@@ -48,11 +48,11 @@ elseif (UNIX)
# configure scripts to test dir
##############################################################################
if (H5_PERL_FOUND)
- configure_file(${HDF5_TEST_SOURCE_DIR}/testflushrefresh.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testflushrefresh.sh @ONLY)
+ configure_file(${HDF5_TEST_SOURCE_DIR}/test_flush_refresh.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_flush_refresh.sh @ONLY)
endif ()
- configure_file(${HDF5_TEST_SOURCE_DIR}/test_usecases.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_usecases.sh @ONLY)
- configure_file(${HDF5_TEST_SOURCE_DIR}/testswmr.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testswmr.sh @ONLY)
- configure_file(${HDF5_TEST_SOURCE_DIR}/testvdsswmr.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/testvdsswmr.sh @ONLY)
+ configure_file(${HDF5_TEST_SOURCE_DIR}/test_use_cases.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_use_cases.sh @ONLY)
+ configure_file(${HDF5_TEST_SOURCE_DIR}/test_swmr.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_swmr.sh @ONLY)
+ configure_file(${HDF5_TEST_SOURCE_DIR}/test_vds_swmr.sh.in ${HDF5_TEST_BINARY_DIR}/H5TEST/test_vds_swmr.sh @ONLY)
##############################################################################
# copy test programs to test dir
@@ -78,44 +78,44 @@ elseif (UNIX)
# flushrefresh
##############################################################################
# autotools script tests
- # error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
- # NOT CONVERTED accum_swmr_reader is used by accum.c.
- # NOT CONVERTED atomic_writer and atomic_reader are standalone programs.
- # links_env is used by testlinks_env.sh
- # filenotclosed and del_many_dense_attrs are used by testabort_fail.sh
- # NOT CONVERTED flushrefresh is used by testflushrefresh.sh.
- # NOT CONVERTED use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
- # NOT CONVERTED swmr_* files (besides swmr.c) are used by testswmr.sh.
- # NOT CONVERTED vds_swmr_* files are used by testvdsswmr.sh
+ # error_test and err_compat are built at the same time as the other tests, but executed by test_error.sh
+ # NOT CONVERTED accum_swmr_reader is used by accum.c
+ # NOT CONVERTED atomic_writer and atomic_reader are stand-alone programs
+ # links_env is used by test_links_env.sh
+ # filenotclosed and del_many_dense_attrs are used by test_abort_fail.sh
+ # NOT CONVERTED flushrefresh is used by test_flush_refresh.sh.
+ # NOT CONVERTED use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_use_cases.sh
+ # NOT CONVERTED swmr_* files (besides swmr.c) are used by test_swmr.sh.
+ # NOT CONVERTED vds_swmr_* files are used by test_vds_swmr.sh
# NOT CONVERTED 'make check' doesn't run them directly, so they are not included in TEST_PROG.
- # NOT CONVERTED Also build testmeta, which is used for timings test. It builds quickly,
+ # NOT CONVERTED Also build testmeta, which is used for timing test. It builds quickly
# NOT CONVERTED and this lets automake keep all its test programs in one place.
##############################################################################
##############################################################################
### S W M R T E S T S
##############################################################################
- # testflushrefresh.sh: flushrefresh
- # test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
- # testswmr.sh: swmr*
- # testvdsswmr.sh: vds_swmr*
- add_test (H5SHELL-testflushrefresh ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testflushrefresh.sh)
- set_tests_properties (H5SHELL-testflushrefresh PROPERTIES
+ # test_flush_refresh.sh: flushrefresh
+ # test_use_cases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+ # test_swmr.sh: swmr*
+ # test_vds_swmr.sh: vds_swmr*
+ add_test (H5SHELL-test_flush_refresh ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/test_flush_refresh.sh)
+ set_tests_properties (H5SHELL-test_flush_refresh PROPERTIES
ENVIRONMENT "LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH}:${CMAKE_RUNTIME_OUTPUT_DIRECTORY}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
- add_test (H5SHELL-test_usecases ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/test_usecases.sh)
- set_tests_properties (H5SHELL-test_usecases PROPERTIES
+ add_test (H5SHELL-test_use_cases ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/test_use_cases.sh)
+ set_tests_properties (H5SHELL-test_use_cases PROPERTIES
ENVIRONMENT "LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH}:${CMAKE_RUNTIME_OUTPUT_DIRECTORY}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
- add_test (H5SHELL-testswmr ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testswmr.sh)
- set_tests_properties (H5SHELL-testswmr PROPERTIES
+ add_test (H5SHELL-test_swmr ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/test_swmr.sh)
+ set_tests_properties (H5SHELL-test_swmr PROPERTIES
ENVIRONMENT "LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH}:${CMAKE_RUNTIME_OUTPUT_DIRECTORY}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
- add_test (H5SHELL-testvdsswmr ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/testvdsswmr.sh)
- set_tests_properties (H5SHELL-testvdsswmr PROPERTIES
+ add_test (H5SHELL-test_vds_swmr ${SH_PROGRAM} ${HDF5_TEST_BINARY_DIR}/H5TEST/test_vds_swmr.sh)
+ set_tests_properties (H5SHELL-test_vds_swmr PROPERTIES
ENVIRONMENT "LD_LIBRARY_PATH=$ENV{LD_LIBRARY_PATH}:${CMAKE_RUNTIME_OUTPUT_DIRECTORY}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
diff --git a/test/app_ref.c b/test/app_ref.c
index 2777dc0..c4ac60c 100644
--- a/test/app_ref.c
+++ b/test/app_ref.c
@@ -69,8 +69,10 @@ Abrt_Handler(int H5_ATTR_UNUSED sig)
{
int i, n;
+ const char *string = " ID reference count: ";
for (i = 0; i < T_NUMCLASSES; i++) {
- HDfprintf(stderr, "%s ID reference count: %n", IDNAME[i], &n);
+ HDfprintf(stderr, "%s%s", IDNAME[i], string);
+ n = (int)(strlen(IDNAME[i]) + strlen(string));
HDfprintf(stderr, "%*d\n", (n < ERR_WIDTH) ? (ERR_WIDTH - n) : 0, rc[i]);
}
}
diff --git a/test/cache_api.c b/test/cache_api.c
index 2d642a6..6e897ec 100644
--- a/test/cache_api.c
+++ b/test/cache_api.c
@@ -1082,7 +1082,7 @@ mdc_api_call_smoke_check(int express_test, unsigned paged, hid_t fcpl_id)
/* create the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
diff --git a/test/cache_common.c b/test/cache_common.c
index 207daa1..9becfa8 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -2317,8 +2317,8 @@ verify_clean(void)
void
verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_entry_status expected[])
{
- static char msg[256];
- int i;
+ char msg[256];
+ int i;
i = 0;
while ((pass) && (i < num_entries)) {
@@ -2330,14 +2330,15 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if ((!expected[i].in_cache) && ((expected[i].is_protected) || (expected[i].is_pinned))) {
pass = FALSE;
- HDsprintf(msg, "%d: Contradictory data in expected[%d].\n", tag, i);
+ HDsnprintf(msg, sizeof(msg), "%d: Contradictory data in expected[%d].\n", tag, i);
failure_mssg = msg;
}
if ((!expected[i].in_cache) && (expected[i].is_dirty) && (!entry_ptr->expunged)) {
pass = FALSE;
- HDsprintf(msg, "%d: expected[%d] specs non-expunged, dirty, non-resident.\n", tag, i);
+ HDsnprintf(msg, sizeof(msg), "%d: expected[%d] specs non-expunged, dirty, non-resident.\n", tag,
+ i);
failure_mssg = msg;
}
@@ -2348,9 +2349,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (in_cache != expected[i].in_cache) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) in cache actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index, (int)in_cache,
- (int)expected[i].in_cache);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) in cache actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index, (int)in_cache,
+ (int)expected[i].in_cache);
failure_mssg = msg;
}
}
@@ -2360,9 +2361,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->size != expected[i].size) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) size actual/expected = %ld/%ld.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index, (long)(entry_ptr->size),
- (long)expected[i].size);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) size actual/expected = %ld/%ld.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index, (long)(entry_ptr->size),
+ (long)expected[i].size);
failure_mssg = msg;
}
}
@@ -2372,9 +2373,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->header.size != expected[i].size) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) header size actual/expected = %ld/%ld.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (long)(entry_ptr->header.size), (long)expected[i].size);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) header size actual/expected = %ld/%ld.\n",
+ tag, (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (long)(entry_ptr->header.size), (long)expected[i].size);
failure_mssg = msg;
}
}
@@ -2384,9 +2385,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->at_main_addr != expected[i].at_main_addr) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) at main addr actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->at_main_addr), (int)expected[i].at_main_addr);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) at main addr actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->at_main_addr), (int)expected[i].at_main_addr);
failure_mssg = msg;
}
}
@@ -2396,9 +2397,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->is_dirty != expected[i].is_dirty) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) is_dirty actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->is_dirty), (int)expected[i].is_dirty);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_dirty actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->is_dirty), (int)expected[i].is_dirty);
failure_mssg = msg;
}
}
@@ -2408,9 +2409,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->header.is_dirty != expected[i].is_dirty) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) header is_dirty actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->header.is_dirty), (int)expected[i].is_dirty);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) header is_dirty actual/expected = %d/%d.\n",
+ tag, (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->header.is_dirty), (int)expected[i].is_dirty);
failure_mssg = msg;
}
}
@@ -2420,9 +2421,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->is_protected != expected[i].is_protected) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) is_protected actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->is_protected), (int)expected[i].is_protected);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_protected actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->is_protected), (int)expected[i].is_protected);
failure_mssg = msg;
}
}
@@ -2432,9 +2433,10 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->header.is_protected != expected[i].is_protected) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) header is_protected actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->header.is_protected), (int)expected[i].is_protected);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) header is_protected actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->header.is_protected), (int)expected[i].is_protected);
failure_mssg = msg;
}
}
@@ -2444,9 +2446,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->is_pinned != expected[i].is_pinned) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) is_pinned actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->is_pinned), (int)expected[i].is_pinned);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_pinned actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->is_pinned), (int)expected[i].is_pinned);
failure_mssg = msg;
}
}
@@ -2456,9 +2458,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->is_corked != expected[i].is_corked) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) is_corked actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->is_corked), (int)expected[i].is_corked);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) is_corked actual/expected = %d/%d.\n", tag,
+ (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->is_corked), (int)expected[i].is_corked);
failure_mssg = msg;
}
}
@@ -2468,9 +2470,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (entry_ptr->header.is_pinned != expected[i].is_pinned) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) header is_pinned actual/expected = %d/%d.\n", tag,
- (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->header.is_pinned), (int)expected[i].is_pinned);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) header is_pinned actual/expected = %d/%d.\n",
+ tag, (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->header.is_pinned), (int)expected[i].is_pinned);
failure_mssg = msg;
}
}
@@ -2482,11 +2484,12 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
(entry_ptr->destroyed != expected[i].destroyed)) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d,%d) deserialized = %d(%d), serialized = %d(%d), dest = %d(%d)\n",
- tag, (int)expected[i].entry_type, (int)expected[i].entry_index,
- (int)(entry_ptr->deserialized), (int)(expected[i].deserialized),
- (int)(entry_ptr->serialized), (int)(expected[i].serialized),
- (int)(entry_ptr->destroyed), (int)(expected[i].destroyed));
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d,%d) deserialized = %d(%d), serialized = %d(%d), dest = %d(%d)\n",
+ tag, (int)expected[i].entry_type, (int)expected[i].entry_index,
+ (int)(entry_ptr->deserialized), (int)(expected[i].deserialized),
+ (int)(entry_ptr->serialized), (int)(expected[i].serialized),
+ (int)(entry_ptr->destroyed), (int)(expected[i].destroyed));
failure_mssg = msg;
}
}
@@ -2497,18 +2500,19 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (pass) {
if (entry_ptr->flush_dep_npar != expected[i].flush_dep_npar) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) flush_dep_npar actual/expected = %u/%u.\n", tag,
- expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_npar,
- expected[i].flush_dep_npar);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) flush_dep_npar actual/expected = %u/%u.\n",
+ tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_npar,
+ expected[i].flush_dep_npar);
failure_mssg = msg;
} /* end if */
} /* end if */
if ((pass) && (in_cache)) {
if (entry_ptr->header.flush_dep_nparents != expected[i].flush_dep_npar) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) header flush_dep_nparents actual/expected = %u/%u.\n", tag,
- expected[i].entry_type, expected[i].entry_index,
- entry_ptr->header.flush_dep_nparents, expected[i].flush_dep_npar);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) header flush_dep_nparents actual/expected = %u/%u.\n", tag,
+ expected[i].entry_type, expected[i].entry_index,
+ entry_ptr->header.flush_dep_nparents, expected[i].flush_dep_npar);
failure_mssg = msg;
} /* end if */
} /* end if */
@@ -2519,9 +2523,10 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
for (u = 0; u < entry_ptr->flush_dep_npar; u++) {
if (entry_ptr->flush_dep_par_type[u] != expected[i].flush_dep_par_type[u]) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) flush_dep_par_type[%u] actual/expected = %d/%d.\n", tag,
- expected[i].entry_type, expected[i].entry_index, u,
- entry_ptr->flush_dep_par_type[u], expected[i].flush_dep_par_type[u]);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) flush_dep_par_type[%u] actual/expected = %d/%d.\n", tag,
+ expected[i].entry_type, expected[i].entry_index, u,
+ entry_ptr->flush_dep_par_type[u], expected[i].flush_dep_par_type[u]);
failure_mssg = msg;
} /* end if */
} /* end for */
@@ -2530,9 +2535,10 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
for (u = 0; u < entry_ptr->flush_dep_npar; u++) {
if (entry_ptr->flush_dep_par_idx[u] != expected[i].flush_dep_par_idx[u]) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) flush_dep_par_idx[%u] actual/expected = %d/%d.\n", tag,
- expected[i].entry_type, expected[i].entry_index, u,
- entry_ptr->flush_dep_par_idx[u], expected[i].flush_dep_par_idx[u]);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) flush_dep_par_idx[%u] actual/expected = %d/%d.\n", tag,
+ expected[i].entry_type, expected[i].entry_index, u,
+ entry_ptr->flush_dep_par_idx[u], expected[i].flush_dep_par_idx[u]);
failure_mssg = msg;
} /* end if */
} /* end for */
@@ -2542,37 +2548,39 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (pass) {
if (entry_ptr->flush_dep_nchd != expected[i].flush_dep_nchd) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) flush_dep_nchd actual/expected = %u/%u.\n", tag,
- expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_nchd,
- expected[i].flush_dep_nchd);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) flush_dep_nchd actual/expected = %u/%u.\n",
+ tag, expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_nchd,
+ expected[i].flush_dep_nchd);
failure_mssg = msg;
} /* end if */
} /* end if */
if ((pass) && (in_cache)) {
if (entry_ptr->header.flush_dep_nchildren != expected[i].flush_dep_nchd) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) header flush_dep_nchildren actual/expected = %u/%u.\n", tag,
- expected[i].entry_type, expected[i].entry_index,
- entry_ptr->header.flush_dep_nchildren, expected[i].flush_dep_nchd);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) header flush_dep_nchildren actual/expected = %u/%u.\n", tag,
+ expected[i].entry_type, expected[i].entry_index,
+ entry_ptr->header.flush_dep_nchildren, expected[i].flush_dep_nchd);
failure_mssg = msg;
} /* end if */
} /* end if */
if (pass) {
if (entry_ptr->flush_dep_ndirty_chd != expected[i].flush_dep_ndirty_chd) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) flush_dep_ndirty_chd actual/expected = %u/%u.\n", tag,
- expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_ndirty_chd,
- expected[i].flush_dep_ndirty_chd);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) flush_dep_ndirty_chd actual/expected = %u/%u.\n", tag,
+ expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_dep_ndirty_chd,
+ expected[i].flush_dep_ndirty_chd);
failure_mssg = msg;
} /* end if */
} /* end if */
if ((pass) && (in_cache)) {
if (entry_ptr->header.flush_dep_ndirty_children != expected[i].flush_dep_ndirty_chd) {
pass = FALSE;
- HDsprintf(msg,
- "%d entry (%d, %d) header flush_dep_ndirty_children actual/expected = %u/%u.\n",
- tag, expected[i].entry_type, expected[i].entry_index,
- entry_ptr->header.flush_dep_ndirty_children, expected[i].flush_dep_ndirty_chd);
+ HDsnprintf(msg, sizeof(msg),
+ "%d entry (%d, %d) header flush_dep_ndirty_children actual/expected = %u/%u.\n",
+ tag, expected[i].entry_type, expected[i].entry_index,
+ entry_ptr->header.flush_dep_ndirty_children, expected[i].flush_dep_ndirty_chd);
failure_mssg = msg;
} /* end if */
} /* end if */
@@ -2581,9 +2589,9 @@ verify_entry_status(H5C_t *cache_ptr, int tag, int num_entries, struct expected_
if (pass) {
if (expected[i].flush_order >= 0 && entry_ptr->flush_order != (unsigned)expected[i].flush_order) {
pass = FALSE;
- HDsprintf(msg, "%d entry (%d, %d) flush_order actual/expected = %u/%d.\n", tag,
- expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_order,
- expected[i].flush_order);
+ HDsnprintf(msg, sizeof(msg), "%d entry (%d, %d) flush_order actual/expected = %u/%d.\n", tag,
+ expected[i].entry_type, expected[i].entry_index, entry_ptr->flush_order,
+ expected[i].flush_order);
failure_mssg = msg;
} /* end if */
} /* end if */
diff --git a/test/cache_image.c b/test/cache_image.c
index d3961a8..419eb58 100644
--- a/test/cache_image.c
+++ b/test/cache_image.c
@@ -164,7 +164,7 @@ create_datasets(hid_t file_id, int min_dset, int max_dset)
/* create the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
@@ -443,7 +443,7 @@ delete_datasets(hid_t file_id, int min_dset, int max_dset)
i = min_dset;
while ((pass) && (i <= max_dset)) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
if (H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
@@ -1019,7 +1019,7 @@ verify_datasets(hid_t file_id, int min_dset, int max_dset)
/* open the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
if (dataset_ids[i] < 0) {
@@ -4624,7 +4624,7 @@ cache_image_smoke_check_5(hbool_t single_file_vfd)
/* 2) Create a process specific group. */
if (pass) {
- HDsprintf(process_group_name, "/process_%d", min_group);
+ HDsnprintf(process_group_name, sizeof(process_group_name), "/process_%d", min_group);
proc_gid = H5Gcreate2(file_id, process_group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -4728,7 +4728,7 @@ cache_image_smoke_check_5(hbool_t single_file_vfd)
if (pass) {
max_group++;
- HDsprintf(process_group_name, "/process_%d", max_group);
+ HDsnprintf(process_group_name, sizeof(process_group_name), "/process_%d", max_group);
proc_gid = H5Gcreate2(file_id, process_group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -4802,7 +4802,7 @@ cache_image_smoke_check_5(hbool_t single_file_vfd)
/* 11) Validate all the zoos. */
i = min_group;
while (pass && i <= max_group) {
- HDsprintf(process_group_name, "/process_%d", i);
+ HDsnprintf(process_group_name, sizeof(process_group_name), "/process_%d", i);
validate_zoo(file_id, process_group_name, i++);
}
@@ -4854,7 +4854,7 @@ cache_image_smoke_check_5(hbool_t single_file_vfd)
i = min_group;
while ((pass) && (i <= max_group)) {
- HDsprintf(process_group_name, "/process_%d", i);
+ HDsnprintf(process_group_name, sizeof(process_group_name), "/process_%d", i);
validate_zoo(file_id, process_group_name, i++);
}
@@ -4914,7 +4914,7 @@ cache_image_smoke_check_5(hbool_t single_file_vfd)
*/
i = min_group;
while ((pass) && (i <= max_group)) {
- HDsprintf(process_group_name, "/process_%d", i);
+ HDsnprintf(process_group_name, sizeof(process_group_name), "/process_%d", i);
validate_zoo(file_id, process_group_name, i++);
}
diff --git a/test/chunk_info.c b/test/chunk_info.c
index 148c577..2bec42d 100644
--- a/test/chunk_info.c
+++ b/test/chunk_info.c
@@ -421,7 +421,7 @@ verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, hsi
/* Ensure the correct chunk indexing scheme is used */
if (idx_type != exp_idx_type) {
char msg[256];
- HDsprintf(msg, "Should be using %s.\n", index_type_str(idx_type));
+ HDsnprintf(msg, sizeof(msg), "Should be using %s.\n", index_type_str(idx_type));
FAIL_PUTS_ERROR(msg);
}
diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c
index 04a931c..024b6c0 100644
--- a/test/cmpd_dset.c
+++ b/test/cmpd_dset.c
@@ -1766,7 +1766,7 @@ test_pack_ooo(void)
/* Insert the compound members in the random order previously generated */
for (i = 0; i < PACK_NMEMBS; i++) {
- HDsprintf(name, "%05d", i);
+ HDsnprintf(name, sizeof(name), "%05d", i);
if (i == sub_cmpd_order) {
if (H5Tinsert(cmpd, name, (size_t)(4 * order[i]), sub_cmpd) < 0)
PACK_OOO_ERROR
@@ -1799,7 +1799,7 @@ test_pack_ooo(void)
/* Insert the compound members in the random order previously generated */
for (i = 0; i < PACK_NMEMBS; i++) {
- HDsprintf(name, "%05d", i);
+ HDsnprintf(name, sizeof(name), "%05d", i);
if (i == sub_cmpd_order) {
if (H5Tinsert(cmpd, name, (size_t)(4 * order[i]), sub_cmpd) < 0)
PACK_OOO_ERROR
@@ -1834,7 +1834,7 @@ test_pack_ooo(void)
/* Insert the compound members in reverse order, with compound last */
for (i = 0; i < PACK_NMEMBS; i++) {
- HDsprintf(name, "%05d", i);
+ HDsnprintf(name, sizeof(name), "%05d", i);
if (i == PACK_NMEMBS - 1) {
if (H5Tinsert(cmpd, name, (size_t)(4 * (PACK_NMEMBS - i - 1)), sub_cmpd) < 0)
PACK_OOO_ERROR
@@ -1867,7 +1867,7 @@ test_pack_ooo(void)
/* Insert the compound members in reverse order, with compound last */
for (i = 0; i < PACK_NMEMBS; i++) {
- HDsprintf(name, "%05d", i);
+ HDsnprintf(name, sizeof(name), "%05d", i);
if (i == PACK_NMEMBS - 1) {
if (H5Tinsert(cmpd, name, (size_t)(4 * (PACK_NMEMBS - i - 1)), sub_cmpd) < 0)
PACK_OOO_ERROR
@@ -1902,7 +1902,7 @@ test_pack_ooo(void)
/* Insert the compound members in forward order, with compound first */
for (i = 0; i < PACK_NMEMBS; i++) {
- HDsprintf(name, "%05d", i);
+ HDsnprintf(name, sizeof(name), "%05d", i);
if (i == 0) {
if (H5Tinsert(cmpd, name, (size_t)(4 * i), sub_cmpd) < 0)
PACK_OOO_ERROR
@@ -1935,7 +1935,7 @@ test_pack_ooo(void)
/* Insert the compound members in forward order */
for (i = 0; i < PACK_NMEMBS; i++) {
- HDsprintf(name, "%05d", i);
+ HDsnprintf(name, sizeof(name), "%05d", i);
if (i == 0) {
if (H5Tinsert(cmpd, name, (size_t)(4 * i), sub_cmpd) < 0)
PACK_OOO_ERROR
diff --git a/test/del_many_dense_attrs.c b/test/del_many_dense_attrs.c
index cf7f607..78aba3b 100644
--- a/test/del_many_dense_attrs.c
+++ b/test/del_many_dense_attrs.c
@@ -119,7 +119,7 @@ main(void)
/* Create attributes in the group */
for (i = ATTR_COUNT; i >= 0; i--) {
/* Set up the attribute name */
- HDsprintf(aname, "%s%d", basename, i);
+ HDsnprintf(aname, sizeof(aname), "%s%d", basename, i);
/* Create the attribute */
if ((aid = H5Acreate2(gid, aname, tid, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -165,7 +165,7 @@ main(void)
/* Delete the attributes */
for (i = 0; i <= ATTR_COUNT; i++) {
/* Set up the attribute name */
- HDsprintf(aname, "%s%d", basename, i);
+ HDsnprintf(aname, sizeof(aname), "%s%d", basename, i);
/* Delete the attribute */
if (H5Adelete(gid, aname) < 0)
diff --git a/test/dsets.c b/test/dsets.c
index 88e3ce0..c24d746 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -8845,7 +8845,7 @@ test_chunk_cache(hid_t fapl)
if ((dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, dapl1)) < 0)
FAIL_STACK_ERROR
- /* Retrieve dapl from dataset, verfiy cache values are the same as on fapl_local */
+ /* Retrieve dapl from dataset, verify cache values are the same as on fapl_local */
if ((dapl2 = H5Dget_access_plist(dsid)) < 0)
FAIL_STACK_ERROR
if (H5Pget_chunk_cache(dapl2, &nslots_4, &nbytes_4, &w0_4) < 0)
@@ -8869,7 +8869,7 @@ test_chunk_cache(hid_t fapl)
if ((dsid = H5Oopen(fid, "dset", dapl1)) < 0)
FAIL_STACK_ERROR
- /* Retrieve dapl from dataset, verfiy cache values are the same as on dapl1 */
+ /* Retrieve dapl from dataset, verify cache values are the same as on dapl1 */
/* Note we rely on the knowledge that H5Pget_chunk_cache retrieves these
* values directly from the dataset structure, and not from a copy of the
* dapl used to open the dataset (which is not preserved).
@@ -8889,7 +8889,7 @@ test_chunk_cache(hid_t fapl)
if ((dsid = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR
- /* Retrieve dapl from dataset, verfiy cache values are the same on fapl_local */
+ /* Retrieve dapl from dataset, verify cache values are the same on fapl_local */
if ((dapl2 = H5Dget_access_plist(dsid)) < 0)
FAIL_STACK_ERROR
if (H5Pget_chunk_cache(dapl2, &nslots_4, &nbytes_4, &w0_4) < 0)
diff --git a/test/dt_arith.c b/test/dt_arith.c
index 3180123..993048a 100644
--- a/test/dt_arith.c
+++ b/test/dt_arith.c
@@ -79,7 +79,7 @@ typedef enum dtype_t {
* be allowed to continue (cf. Posix signals) so in order to recover from a
* SIGFPE we run tests that might generate one in a child process.
*/
-#ifdef H5_HAVE_UNISTD_H
+#if defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)
#define HANDLE_SIGFPE
#endif
diff --git a/test/dtransform.c b/test/dtransform.c
index 9f7e659..1f6d48b 100644
--- a/test/dtransform.c
+++ b/test/dtransform.c
@@ -122,9 +122,8 @@ const int transformData[ROWS][COLS] = {{36, 31, 25, 19, 13, 7, 1, 5, 11, 16, 22,
{ \
TYPE array[ROWS][COLS]; \
const char *f_to_c = "(5/9.0)*(x-32)"; \
- /* utrans is a transform for unsigned types: no negative numbers involved and results are < 255 to \
- * fit into uchar */ \
- const char *utrans = "((x+100)/4)*3"; \
+ /* utrans is a transform for char types: numbers are restricted from -128 to 127, fits into char */ \
+ const char *utrans = "(x/4+25)*3"; \
\
hid_t dataspace, dxpl_id_f_to_c, dxpl_id_utrans, dset, dset_nn, dt_nn; \
H5T_order_t order; \
@@ -211,9 +210,8 @@ const int transformData[ROWS][COLS] = {{36, 31, 25, 19, 13, 7, 1, 5, 11, 16, 22,
{ \
TYPE array[ROWS][COLS]; \
const char *f_to_c = "(5/9.0)*(x-32)"; \
- /* utrans is a transform for unsigned types: no negative numbers involved and results are < 255 to \
- * fit into uchar */ \
- const char *utrans = "((x+100)/4)*3"; \
+ /* utrans is a transform for char types: numbers are restricted from -128 to 127, fits into char */ \
+ const char *utrans = "(x/4+25)*3"; \
\
hid_t dataspace, dxpl_id_f_to_c, dxpl_id_utrans, cparms, memspace, dset_chunk, filespace; \
hsize_t dim[2] = {ROWS, COLS}; \
@@ -314,7 +312,7 @@ main(void)
const char *simple = "(4/2) * ( (2 + 4)/(5 - 2.5))"; /* this equals 4.8 */
const char *polynomial = "(2+x)* ((x-8)/2)";
/* inverses the utrans transform in init_test to get back original array */
- const char *utrans_inv = "(x/3)*4 - 100";
+ const char *utrans_inv = "(x/3 - 25)*4";
if ((file_id = H5Fcreate("dtransform.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
TEST_ERROR;
@@ -436,9 +434,8 @@ static int
init_test(hid_t file_id)
{
const char *f_to_c = "(5/9.0)*(x-32)";
- /* utrans is a transform for unsigned types: no negative numbers involved and results are < 255 to fit
- * into uchar */
- const char *utrans = "((x+100)/4)*3";
+ /* utrans is a transform for char types: numbers are restricted from -128 to 127, fits into char */
+ const char *utrans = "(x/4+25)*3";
hid_t dataspace = -1;
hid_t dxpl_id_f_to_c = -1;
diff --git a/test/dtypes.c b/test/dtypes.c
index 5a4f2d6..d2064b7 100644
--- a/test/dtypes.c
+++ b/test/dtypes.c
@@ -2635,7 +2635,7 @@ test_compound_13(void)
HDmemset(&data_out, 0, sizeof(data_out));
for (u = 0; u < COMPOUND13_ARRAY_SIZE + 1; u++)
data_out.x[u] = (unsigned char)u;
- data_out.y = 99.99f;
+ data_out.y = 99.99F;
/* Set latest_format in access property list to enable the latest
* compound datatype format.
@@ -4969,7 +4969,7 @@ test_conv_str_2(void)
} /* end for */
/* Do the conversions */
- HDsprintf(s, "Testing random string conversion speed");
+ HDsnprintf(s, sizeof(s), "Testing random string conversion speed");
HDprintf("%-70s", s);
HDfflush(stdout);
if (H5Tconvert(c_type, f_type, nelmts, buf, NULL, H5P_DEFAULT) < 0)
@@ -5184,14 +5184,14 @@ test_conv_enum_1(void)
buf[u] = HDrand() % 26;
/* Conversions */
- HDsprintf(s, "Testing random enum conversion O(N)");
+ HDsnprintf(s, sizeof(s), "Testing random enum conversion O(N)");
HDprintf("%-70s", s);
HDfflush(stdout);
if (H5Tconvert(t1, t2, nelmts, buf, NULL, H5P_DEFAULT) < 0)
goto error;
PASSED();
- HDsprintf(s, "Testing random enum conversion O(N log N)");
+ HDsnprintf(s, sizeof(s), "Testing random enum conversion O(N log N)");
HDprintf("%-70s", s);
HDfflush(stdout);
if (H5Tconvert(t2, t1, nelmts, buf, NULL, H5P_DEFAULT) < 0)
diff --git a/test/earray.c b/test/earray.c
index 4763d51..7c8dd1f 100644
--- a/test/earray.c
+++ b/test/earray.c
@@ -2543,15 +2543,16 @@ main(void)
/* Test first element in data block */
nelmts = (hsize_t)((hsize_t)1 + cparam.idx_blk_elmts + tparam.sblk_info[sblk].start_idx +
(tparam.sblk_info[sblk].dblk_nelmts * dblk));
- HDsprintf(test_str, "setting first element of array's data block #%llu",
- (unsigned long long)ndblks);
+ HDsnprintf(test_str, sizeof(test_str),
+ "setting first element of array's data block #%llu",
+ (unsigned long long)ndblks);
nerrors += test_set_elmts(fapl, &cparam, &tparam, nelmts, test_str);
/* Test all elements in data block */
nelmts = (hsize_t)(cparam.idx_blk_elmts + tparam.sblk_info[sblk].start_idx +
(tparam.sblk_info[sblk].dblk_nelmts * (dblk + 1)));
- HDsprintf(test_str, "setting all elements of array's data block #%llu",
- (unsigned long long)ndblks);
+ HDsnprintf(test_str, sizeof(test_str), "setting all elements of array's data block #%llu",
+ (unsigned long long)ndblks);
nerrors += test_set_elmts(fapl, &cparam, &tparam, nelmts, test_str);
/* Increment data block being tested */
diff --git a/test/enc_dec_plist.c b/test/enc_dec_plist.c
index 60b229a..0a73273 100644
--- a/test/enc_dec_plist.c
+++ b/test/enc_dec_plist.c
@@ -199,8 +199,8 @@ main(void)
/* Display testing info */
low_string = h5_get_version_string(low);
high_string = h5_get_version_string(high);
- HDsprintf(msg, "Testing ENCODE/DECODE with file version bounds: (%s, %s):", low_string,
- high_string);
+ HDsnprintf(msg, sizeof(msg),
+ "Testing ENCODE/DECODE with file version bounds: (%s, %s):", low_string, high_string);
HDputs(msg);
if (VERBOSE_MED)
diff --git a/test/event_set.c b/test/event_set.c
index 5df49e9..c2a17b1 100644
--- a/test/event_set.c
+++ b/test/event_set.c
@@ -19,8 +19,157 @@
#include "h5test.h"
#include "H5srcdir.h"
+#define EVENT_SET_NUM_CONNECTOR_IDS 2
+
const char *FILENAME[] = {"event_set_1", NULL};
+hid_t connector_ids_g[EVENT_SET_NUM_CONNECTOR_IDS];
+
+herr_t fake_wait_request_wait(void *req, uint64_t timeout, H5VL_request_status_t *status);
+herr_t fake_wait_request_free(void *req);
+
+/* A VOL class struct that describes a VOL class with no
+ * functionality, other than a wait that returns success.
+ */
+static const H5VL_class_t fake_wait_vol_g = {
+ H5VL_VERSION, /* VOL class struct version */
+ ((H5VL_class_value_t)501), /* value */
+ "fake_wait", /* name */
+ 0, /* connector version */
+ 0, /* capability flags */
+ NULL, /* initialize */
+ NULL, /* terminate */
+ {
+ /* info_cls */
+ (size_t)0, /* size */
+ NULL, /* copy */
+ NULL, /* compare */
+ NULL, /* free */
+ NULL, /* to_str */
+ NULL, /* from_str */
+ },
+ {
+ /* wrap_cls */
+ NULL, /* get_object */
+ NULL, /* get_wrap_ctx */
+ NULL, /* wrap_object */
+ NULL, /* unwrap_object */
+ NULL, /* free_wrap_ctx */
+ },
+ {
+ /* attribute_cls */
+ NULL, /* create */
+ NULL, /* open */
+ NULL, /* read */
+ NULL, /* write */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL, /* optional */
+ NULL /* close */
+ },
+ {
+ /* dataset_cls */
+ NULL, /* create */
+ NULL, /* open */
+ NULL, /* read */
+ NULL, /* write */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL, /* optional */
+ NULL /* close */
+ },
+ {
+ /* datatype_cls */
+ NULL, /* commit */
+ NULL, /* open */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL, /* optional */
+ NULL /* close */
+ },
+ {
+ /* file_cls */
+ NULL, /* create */
+ NULL, /* open */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL, /* optional */
+ NULL /* close */
+ },
+ {
+ /* group_cls */
+ NULL, /* create */
+ NULL, /* open */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL, /* optional */
+ NULL /* close */
+ },
+ {
+ /* link_cls */
+ NULL, /* create */
+ NULL, /* copy */
+ NULL, /* move */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL /* optional */
+ },
+ {
+ /* object_cls */
+ NULL, /* open */
+ NULL, /* copy */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL /* optional */
+ },
+ {
+ /* introspect_cls */
+ NULL, /* get_conn_cls */
+ NULL, /* get_cap_flags */
+ NULL, /* opt_query */
+ },
+ {
+ /* request_cls */
+ fake_wait_request_wait, /* wait */
+ NULL, /* notify */
+ NULL, /* cancel */
+ NULL, /* specific */
+ NULL, /* optional */
+ fake_wait_request_free /* free */
+ },
+ {
+ /* blob_cls */
+ NULL, /* put */
+ NULL, /* get */
+ NULL, /* specific */
+ NULL /* optional */
+ },
+ {
+ /* token_cls */
+ NULL, /* cmp */
+ NULL, /* to_str */
+ NULL /* from_str */
+ },
+ NULL /* optional */
+};
+
+herr_t
+fake_wait_request_wait(void H5_ATTR_UNUSED *req, uint64_t H5_ATTR_UNUSED timeout,
+ H5VL_request_status_t *status)
+{
+ /* Set status if requested */
+ if (status)
+ *status = H5VL_REQUEST_STATUS_SUCCEED;
+
+ return 0;
+} /* end H5_daos_req_wait() */
+
+herr_t
+fake_wait_request_free(void H5_ATTR_UNUSED *req)
+{
+ return 0;
+} /* end fake_wait_request_free() */
+
/*-------------------------------------------------------------------------
* Function: test_es_create
*
@@ -159,6 +308,348 @@ error:
}
/*-------------------------------------------------------------------------
+ * Function: test_es_get_requests
+ *
+ * Purpose: Tests getting requests from event set.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Neil Fortner
+ * Wednesday, November 24, 2021
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_es_get_requests(void)
+{
+ hid_t es_id; /* Event set ID */
+ hid_t connector_ids[2]; /* Connector IDs */
+ void * requests[2]; /* Requests */
+ int req_targets[2]; /* Dummy targets for void * requests */
+ size_t count; /* # of events in set */
+ hbool_t op_failed; /* Whether an operation failed (unused) */
+
+ TESTING("event set get requests");
+
+ /* Create an event set */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR
+
+ /* Get number of requests in event set */
+ count = 3;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, NULL, NULL, 0, &count) < 0)
+ TEST_ERROR
+ if (count != 0)
+ TEST_ERROR
+
+ /* Get only connector IDs */
+ count = 3;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, connector_ids, NULL, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 0)
+ TEST_ERROR
+ if (connector_ids[0] != H5I_INVALID_HID)
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+
+ /* Get only requests */
+ count = 3;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, NULL, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 0)
+ TEST_ERROR
+ if (requests[0] != NULL)
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Get both */
+ count = 3;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, connector_ids, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 0)
+ TEST_ERROR
+ if (connector_ids[0] != H5I_INVALID_HID)
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+ if (requests[0] != NULL)
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Insert event into event set */
+ if (H5ESinsert_request(es_id, connector_ids_g[0], &req_targets[0]) < 0)
+ TEST_ERROR
+
+ /* Get number of requests in event set */
+ count = 0;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, NULL, NULL, 0, &count) < 0)
+ TEST_ERROR
+ if (count != 1)
+ TEST_ERROR
+
+ /* Get only connector IDs */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, connector_ids, NULL, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 1)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[0])
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+
+ /* Get only requests */
+ count = 0;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, NULL, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 1)
+ TEST_ERROR
+ if (requests[0] != &req_targets[0])
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Get both */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, connector_ids, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 1)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[0])
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+ if (requests[0] != &req_targets[0])
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Insert second event into event set */
+ if (H5ESinsert_request(es_id, connector_ids_g[1], &req_targets[1]) < 0)
+ TEST_ERROR
+
+ /* Get number of requests in event set */
+ count = 0;
+ if (H5ESget_requests(es_id, H5_ITER_NATIVE, NULL, NULL, 0, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+
+ /* Get only connector IDs */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ if (H5ESget_requests(es_id, H5_ITER_INC, connector_ids, NULL, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[0])
+ TEST_ERROR
+ if (connector_ids[1] != connector_ids_g[1])
+ TEST_ERROR
+
+ /* Try with H5_ITER_DEC */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ if (H5ESget_requests(es_id, H5_ITER_DEC, connector_ids, NULL, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[1])
+ TEST_ERROR
+ if (connector_ids[1] != connector_ids_g[0])
+ TEST_ERROR
+
+ /* Get only requests */
+ count = 0;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_INC, NULL, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (requests[0] != &req_targets[0])
+ TEST_ERROR
+ if (requests[1] != &req_targets[1])
+ TEST_ERROR
+
+ /* Try with H5_ITER_DEC */
+ count = 0;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_DEC, NULL, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (requests[0] != &req_targets[1])
+ TEST_ERROR
+ if (requests[1] != &req_targets[0])
+ TEST_ERROR
+
+ /* Get both */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_INC, connector_ids, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[0])
+ TEST_ERROR
+ if (connector_ids[1] != connector_ids_g[1])
+ TEST_ERROR
+ if (requests[0] != &req_targets[0])
+ TEST_ERROR
+ if (requests[1] != &req_targets[1])
+ TEST_ERROR
+
+ /* Try with H5_ITER_DEC */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_DEC, connector_ids, requests, 2, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[1])
+ TEST_ERROR
+ if (connector_ids[1] != connector_ids_g[0])
+ TEST_ERROR
+ if (requests[0] != &req_targets[1])
+ TEST_ERROR
+ if (requests[1] != &req_targets[0])
+ TEST_ERROR
+
+ /* Get only first connector ID */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ if (H5ESget_requests(es_id, H5_ITER_INC, connector_ids, NULL, 1, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[0])
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+
+ /* Try with H5_ITER_DEC */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ if (H5ESget_requests(es_id, H5_ITER_DEC, connector_ids, NULL, 1, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[1])
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+
+ /* Get only first request */
+ count = 0;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_INC, NULL, requests, 1, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (requests[0] != &req_targets[0])
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Try with H5_ITER_DEC */
+ count = 0;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_DEC, NULL, requests, 1, &count) < 0)
+ TEST_ERROR
+ if (count != 2)
+ TEST_ERROR
+ if (requests[0] != &req_targets[1])
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Get only first of both */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_INC, connector_ids, requests, 1, &count) < 0)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[0])
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+ if (requests[0] != &req_targets[0])
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Try with H5_ITER_DEC */
+ count = 0;
+ connector_ids[0] = H5I_INVALID_HID;
+ connector_ids[1] = H5I_INVALID_HID;
+ requests[0] = NULL;
+ requests[1] = NULL;
+ if (H5ESget_requests(es_id, H5_ITER_DEC, connector_ids, requests, 1, &count) < 0)
+ TEST_ERROR
+ if (connector_ids[0] != connector_ids_g[1])
+ TEST_ERROR
+ if (connector_ids[1] != H5I_INVALID_HID)
+ TEST_ERROR
+ if (requests[0] != &req_targets[1])
+ TEST_ERROR
+ if (requests[1] != NULL)
+ TEST_ERROR
+
+ /* Close the event set */
+ if (H5ESwait(es_id, 10000000, &count, &op_failed) < 0)
+ TEST_ERROR
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+ return 1;
+}
+
+/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Tests event sets
@@ -175,27 +666,41 @@ int
main(void)
{
hid_t fapl_id = H5I_INVALID_HID; /* File access property list */
+ int i; /* Local index variable */
int nerrors = 0; /* Error count */
/* Setup */
h5_reset();
fapl_id = h5_fileaccess();
+ /* Register dummy connector IDs */
+ for (i = 0; i < EVENT_SET_NUM_CONNECTOR_IDS; i++)
+ if ((connector_ids_g[i] = H5VLregister_connector(&fake_wait_vol_g, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
/* Tests */
nerrors += test_es_create();
nerrors += test_es_none();
+ nerrors += test_es_get_requests();
+
+ /* Unregister dummy connectors */
+ for (i = 0; i < EVENT_SET_NUM_CONNECTOR_IDS; i++)
+ if (H5VLunregister_connector(connector_ids_g[i]) < 0)
+ TEST_ERROR
/* Cleanup */
h5_cleanup(FILENAME, fapl_id);
/* Check for any errors */
- if (nerrors) {
- HDputs("***** EVENT SET TESTS FAILED *****");
- HDexit(EXIT_FAILURE);
- } /* end if */
+ if (nerrors)
+ goto error;
/* Report status */
HDputs("All event set tests passed.");
HDexit(EXIT_SUCCESS);
+
+error:
+ HDputs("***** EVENT SET TESTS FAILED *****");
+ HDexit(EXIT_FAILURE);
} /* end main() */
diff --git a/test/external.c b/test/external.c
index 281593c..bb86cde 100644
--- a/test/external.c
+++ b/test/external.c
@@ -166,8 +166,7 @@ test_non_extendible(hid_t file)
if (file_size != (max_size[0] * sizeof(int))) {
H5_FAILED();
HDputs(" Wrong file size.");
- HDprintf(" got: %lu\n ans: %lu\n", (unsigned long)file_size,
- (unsigned long)max_size[0] * sizeof(int));
+ HDprintf(" got: %" PRIuHSIZE "\n ans: %" PRIuHSIZE "\n", file_size, max_size[0] * sizeof(int));
goto error;
}
@@ -890,8 +889,8 @@ test_write_file_set(hid_t fapl)
for (i = 0; i < N_EXT_FILES; i++) {
char name1[64], name2[64];
- HDsprintf(name1, "extern_%dr.raw", i + 1);
- HDsprintf(name2, "extern_%dw.raw", i + 1);
+ HDsnprintf(name1, sizeof(name1), "extern_%dr.raw", i + 1);
+ HDsnprintf(name2, sizeof(name2), "extern_%dw.raw", i + 1);
if (!files_have_same_contents(name1, name2))
FAIL_PUTS_ERROR(" Output differs from expected value.")
} /* end for */
@@ -986,11 +985,11 @@ test_path_absolute(hid_t fapl)
if (NULL == HDgetcwd(cwdpath, sizeof(cwdpath)))
TEST_ERROR
for (i = 0; i < N_EXT_FILES; i++) {
- HDsnprintf(filename, sizeof(filename), "%s%sextern_%dr.raw", cwdpath, H5_DIR_SEPS, (int)i + 1);
+ HDsnprintf(filename, sizeof(filename), "%s%sextern_%zur.raw", cwdpath, H5_DIR_SEPS, i + 1);
#if defined(H5_HAVE_WINDOW_PATH)
/* For windows, test path-absolute case (\dir\file.raw) for the second file */
if (i == 1)
- HDsnprintf(filename, sizeof(filename), "%s%sextern_%dr.raw", cwdpath + 2, H5_DIR_SEPS, i + 1);
+ HDsnprintf(filename, sizeof(filename), "%s%sextern_%zur.raw", cwdpath + 2, H5_DIR_SEPS, i + 1);
#endif
if (H5Pset_external(dcpl, filename, (off_t)(i * GARBAGE_PER_FILE), (hsize_t)sizeof(part)) < 0)
FAIL_STACK_ERROR
diff --git a/test/external_common.c b/test/external_common.c
index c37fd16..3f04260 100644
--- a/test/external_common.c
+++ b/test/external_common.c
@@ -59,9 +59,9 @@ reset_raw_data_files(hbool_t is_env)
/* Open file */
if (is_env)
- HDsprintf(filename, "extern_env_%lur.raw", (unsigned long)i + 1);
+ HDsnprintf(filename, sizeof(filename), "extern_env_%lur.raw", (unsigned long)i + 1);
else
- HDsprintf(filename, "extern_%lur.raw", (unsigned long)i + 1);
+ HDsnprintf(filename, sizeof(filename), "extern_%lur.raw", (unsigned long)i + 1);
if ((fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW)) < 0)
goto error;
@@ -96,9 +96,9 @@ reset_raw_data_files(hbool_t is_env)
/* Open file */
if (is_env)
- HDsprintf(filename, "extern_env_%luw.raw", (unsigned long)i + 1);
+ HDsnprintf(filename, sizeof(filename), "extern_env_%luw.raw", (unsigned long)i + 1);
else
- HDsprintf(filename, "extern_%luw.raw", (unsigned long)i + 1);
+ HDsnprintf(filename, sizeof(filename), "extern_%luw.raw", (unsigned long)i + 1);
if ((fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW)) < 0)
goto error;
diff --git a/test/fheap.c b/test/fheap.c
index 5ded20f..b897063 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -466,40 +466,24 @@ error:
return (1);
} /* add_obj() */
-/*-------------------------------------------------------------------------
- * Function: get_del_string
- *
- * Purpose: Return string describing the kind of deletion to perform
- *
- * Return: Success: 0
- *
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Monday, June 6, 2006
- *
- *-------------------------------------------------------------------------
- */
-static char *
+/* Return a string describing the kind of deletion to perform. */
+static const char *
get_del_string(const fheap_test_param_t *tparam)
{
- char *str;
-
/* Remove half of total objects from heap */
if (tparam->del_dir == FHEAP_DEL_FORWARD)
if (tparam->drain_half == FHEAP_DEL_DRAIN_ALL)
- str = H5MM_strdup("(all - forward)");
+ return "(all - forward)";
else
- str = H5MM_strdup("(half, refill, all - forward)");
+ return "(half, refill, all - forward)";
else if (tparam->del_dir == FHEAP_DEL_REVERSE)
if (tparam->drain_half == FHEAP_DEL_DRAIN_ALL)
- str = H5MM_strdup("(all - reverse)");
+ return "(all - reverse)";
else
- str = H5MM_strdup("(half, refill, all - reverse)");
+ return "(half, refill, all - reverse)";
else
- str = H5MM_strdup("(all - deleting heap)");
+ return "(all - deleting heap)";
- return (str);
} /* get_del_string() */
/*-------------------------------------------------------------------------
@@ -547,28 +531,20 @@ get_fill_size(const fheap_test_param_t *tparam)
*
*-------------------------------------------------------------------------
*/
-/* Disable warning for "format not a string literal" here -QAK */
-/*
- * This pragma only needs to surround the snprintf() calls with
- * test_desc in the code below, but early (4.4.7, at least) gcc only
- * allows diagnostic pragmas to be toggled outside of functions.
- */
-H5_GCC_CLANG_DIAG_OFF("format-nonliteral")
static int
begin_test(fheap_test_param_t *tparam, const char *base_desc, fheap_heap_ids_t *keep_ids, size_t *fill_size)
{
- char *del_str = NULL; /* Deletion order description */
- char *test_desc = NULL; /* Test description */
+ char * test_desc; /* Test description */
+ const char *del_str = get_del_string(tparam);
/*
* Test filling & removing all (small) objects from root direct block of absolute heap
*/
- del_str = get_del_string(tparam);
- HDassert(del_str);
- test_desc = (char *)H5MM_malloc(HDstrlen(del_str) + HDstrlen(base_desc));
- HDsprintf(test_desc, base_desc, del_str);
+ size_t test_desc_len = strlen(base_desc) + sizeof(" ") + strlen(del_str);
+ test_desc = H5MM_malloc(test_desc_len);
+ (void)HDsnprintf(test_desc, test_desc_len, "%s %s", base_desc, del_str);
+
TESTING(test_desc);
- H5MM_xfree(del_str);
H5MM_xfree(test_desc);
/* Initialize the heap ID structure */
@@ -581,7 +557,6 @@ begin_test(fheap_test_param_t *tparam, const char *base_desc, fheap_heap_ids_t *
/* Success */
return (0);
} /* end begin_test() */
-H5_GCC_CLANG_DIAG_ON("format-nonliteral")
/*-------------------------------------------------------------------------
* Function: reopen_file
@@ -7769,7 +7744,7 @@ test_man_remove_root_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from root direct block of absolute heap %s";
+ const char *base_desc = "removing all objects from root direct block of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -7842,7 +7817,7 @@ test_man_remove_two_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from two direct blocks of absolute heap %s";
+ const char *base_desc = "removing all objects from two direct blocks of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -7930,7 +7905,7 @@ test_man_remove_first_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from first row of direct blocks of absolute heap %s";
+ const char *base_desc = "removing all objects from first row of direct blocks of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8000,7 +7975,7 @@ test_man_remove_first_two_rows(hid_t fapl, H5HF_create_t *cparam, fheap_test_par
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from first two rows of direct blocks of absolute heap %s";
+ const char *base_desc = "removing all objects from first two rows of direct blocks of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8072,7 +8047,7 @@ test_man_remove_first_four_rows(hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from first four rows of direct blocks of absolute heap %s";
+ const char *base_desc = "removing all objects from first four rows of direct blocks of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8148,7 +8123,7 @@ test_man_remove_all_root_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from all direct blocks of root group in absolute heap %s";
+ const char *base_desc = "removing all objects from all direct blocks of root group in absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8218,7 +8193,7 @@ test_man_remove_2nd_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_param
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from 2nd level indirect blocks of absolute heap %s";
+ const char *base_desc = "removing all objects from 2nd level indirect blocks of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8292,7 +8267,7 @@ test_man_remove_3rd_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_param
size_t fill_size; /* Size of objects for "bulk" filled blocks */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "removing all objects from 3rd level indirect blocks of absolute heap %s";
+ const char *base_desc = "removing all objects from 3rd level indirect blocks of absolute heap";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8373,8 +8348,7 @@ test_man_skip_start_block(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
size_t obj_size; /* Size of object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc =
- "inserting object that is too large for starting block, then remove all objects %s";
+ const char *base_desc = "inserting object that is too large for starting block, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -8453,7 +8427,7 @@ test_man_skip_start_block_add_back(hid_t fapl, H5HF_create_t *cparam, fheap_test
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
const char *base_desc =
- "skipping starting block, then adding object back to first block, then remove all objects %s";
+ "skipping starting block, then adding object back to first block, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -8553,7 +8527,7 @@ test_man_skip_start_block_add_skipped(hid_t fapl, H5HF_create_t *cparam, fheap_t
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
const char *base_desc =
- "skipping starting block, then adding objects to backfill and extend, then remove all objects %s";
+ "skipping starting block, then adding objects to backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8662,7 +8636,7 @@ test_man_skip_2nd_block(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *t
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
const char *base_desc = "insert object to initial block, then add object too large for starting direct "
- "blocks, then remove all objects %s";
+ "blocks, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -8761,7 +8735,7 @@ test_man_skip_2nd_block_add_skipped(hid_t fapl, H5HF_create_t *cparam, fheap_tes
unsigned v; /* Local index variables */
/* Test description */
const char *base_desc = "insert object to initial block, then add object too large for starting direct "
- "blocks, then backfill and extend, then remove all objects %s";
+ "blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -8907,7 +8881,7 @@ test_man_fill_one_partial_skip_2nd_block_add_skipped(hid_t fapl, H5HF_create_t *
unsigned u; /* Local index variable */
/* Test description */
const char *base_desc =
- "skipping blocks with indirect root, then backfill and extend, then remove all objects %s";
+ "skipping blocks with indirect root, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -9072,7 +9046,7 @@ test_man_fill_row_skip_add_skipped(hid_t fapl, H5HF_create_t *cparam, fheap_test
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
const char *base_desc =
- "filling first row, then skipping rows, then backfill and extend, then remove all objects %s";
+ "filling first row, then skipping rows, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -9202,7 +9176,7 @@ test_man_skip_direct_skip_indirect_two_rows_add_skipped(hid_t fapl, H5HF_create_
unsigned v; /* Local index variables */
/* Test description */
const char *base_desc = "skipping direct blocks to last row and skipping two rows of root indirect "
- "block, then backfill and extend, then remove all objects %s";
+ "block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -9328,7 +9302,7 @@ test_man_fill_direct_skip_indirect_start_block_add_skipped(hid_t fapl, H5HF_crea
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
const char *base_desc = "filling direct blocks and skipping blocks in non-root indirect block, then "
- "backfill and extend, then remove all objects %s";
+ "backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -9457,7 +9431,7 @@ test_man_fill_direct_skip_2nd_indirect_start_block_add_skipped(hid_t fapl, H5HF_
unsigned u; /* Local index variable */
/* Test description */
const char *base_desc = "filling direct blocks and skipping row of non-root indirect blocks, then "
- "backfill and extend, then remove all objects %s";
+ "backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -9589,7 +9563,7 @@ test_man_fill_2nd_direct_less_one_wrap_start_block_add_skipped(hid_t fapl, H5HF_
/* Test description */
const char *base_desc =
"filling direct blocks, filling 2nd level indirect blocks, except last one, and insert object too "
- "large for 2nd level indirect blocks, then backfill and extend, then remove all objects %s";
+ "large for 2nd level indirect blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -9735,7 +9709,7 @@ test_man_fill_direct_skip_2nd_indirect_skip_2nd_block_add_skipped(hid_t fapl, H5
unsigned u; /* Local index variable */
/* Test description */
const char *base_desc = "filling direct blocks and skipping row of non-root indirect blocks, then skip "
- "row of direct blocks, then backfill and extend, then remove all objects %s";
+ "row of direct blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -9898,7 +9872,7 @@ test_man_fill_direct_skip_indirect_two_rows_add_skipped(hid_t fapl, H5HF_create_
unsigned u, v; /* Local index variables */
/* Test description */
const char *base_desc = "filling direct blocks and skipping two rows of root indirect block, then "
- "backfill and extend, then remove all objects %s";
+ "backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10055,7 +10029,7 @@ test_man_fill_direct_skip_indirect_two_rows_skip_indirect_row_add_skipped(hid_t
/* Test description */
const char *base_desc =
"filling direct blocks and skipping two rows of root indirect block, skip one row of root indirect "
- "block, then backfill and extend, then remove all objects %s";
+ "block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10235,7 +10209,7 @@ test_man_fill_2nd_direct_skip_start_block_add_skipped(hid_t fapl, H5HF_create_t
/* Test description */
const char *base_desc =
"filling direct blocks, filling 2nd level indirect blocks, and skip first rows of direct blocks of "
- "3rd level indirect block, then backfill and extend, then remove all objects %s";
+ "3rd level indirect block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10367,7 +10341,7 @@ test_man_fill_2nd_direct_skip_2nd_indirect_start_block_add_skipped(hid_t fapl, H
const char *base_desc =
"filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect block's direct "
"blocks, and skip first rows of direct blocks of 3rd level indirect block's 2nd level indirect "
- "block, then backfill and extend, then remove all objects %s";
+ "block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10511,7 +10485,7 @@ test_man_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_block_add_skipped(h
/* Test description */
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level "
"indirect block's direct blocks, and skip first row of indirect blocks of 3rd "
- "level indirect block, then backfill and extend, then remove all objects %s";
+ "level indirect block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10664,7 +10638,7 @@ test_man_fill_2nd_direct_fill_direct_skip2_3rd_indirect_start_block_add_skipped(
/* Test description */
const char *base_desc = "filling direct blocks, filling 2nd level indirect blocks, filling 3rd level "
"indirect block's direct blocks, and skip first two rows of indirect blocks of "
- "3rd level indirect block, then backfill and extend, then remove all objects %s";
+ "3rd level indirect block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10822,7 +10796,7 @@ test_man_fill_3rd_direct_less_one_fill_direct_wrap_start_block_add_skipped(hid_t
const char *base_desc =
"filling direct blocks, filling 2nd level indirect blocks, filling first row of 3rd level indirect "
"blocks, except last one, fill all direct blocks in last 3rd level indirect block, and insert object "
- "too large for it's 2nd level indirect blocks, then backfill and extend, then remove all objects %s";
+ "too large for it's 2nd level indirect blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -10986,7 +10960,7 @@ test_man_fill_1st_row_3rd_direct_fill_2nd_direct_less_one_wrap_start_block_add_s
"filling direct blocks, filling 2nd level indirect blocks, filling first row of 3rd level indirect "
"blocks, fill all direct blocks in next 3rd level indirect block, fill all 1st row of 2nd level "
"indirect blocks, except last one, and insert object too large for 2nd level indirect block, then "
- "backfill and extend, then remove all objects %s";
+ "backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -11154,7 +11128,7 @@ test_man_fill_3rd_direct_fill_direct_skip_start_block_add_skipped(hid_t fapl, H5
const char *base_desc =
"filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill "
"4th level indirect block's direct blocks, and skip first row of 2nd indirect blocks of 4th level "
- "indirect block, then backfill and extend, then remove all objects %s";
+ "indirect block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -11318,7 +11292,7 @@ test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_blo
"filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill "
"4th level indirect block's direct, 2nd level indirect blocks and 3rd level direct block, and skip "
"first row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect block, then "
- "backfill and extend, then remove all objects %s";
+ "backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -11502,7 +11476,7 @@ test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_two_rows_
"first row of 4th level indirect blocks, fill 2nd row 4th level indirect block's direct, 2nd level "
"indirect blocks, first row of 3rd level indirect blocks, 3rd level direct block in 2nd row, and "
"skip first row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect block, then "
- "backfill and extend, then remove all objects %s";
+ "backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -11721,7 +11695,7 @@ test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_star
"filling direct blocks, filling 2nd level indirect blocks, filling 3rd level indirect blocks, fill "
"first row of 3rd level indirect blocks in 4th level indirect block except last 3rd level block, "
"fill direct blocks in 3rd level block, and skip row of 2nd indirect blocks of 4th level indirect "
- "block's 3rd level indirect block, then backfill and extend, then remove all objects %s";
+ "block's 3rd level indirect block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -11922,7 +11896,7 @@ test_man_fill_4th_direct_less_one_fill_2nd_direct_fill_direct_skip_3rd_indirect_
"first row of 4th level indirect blocks, except last one, fill first row of 3rd level indirect "
"blocks in last 4th level indirect block except last 3rd level block, fill direct blocks in 3rd "
"level block, and skip row of 2nd indirect blocks of 4th level indirect block's 3rd level indirect "
- "block, then backfill and extend, then remove all objects %s";
+ "block, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -12143,7 +12117,7 @@ test_man_frag_simple(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
unsigned u; /* Local index variables */
/* Test description */
- const char *base_desc = "fragmenting small blocks, then backfill and extend, then remove all objects %s";
+ const char *base_desc = "fragmenting small blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -12275,7 +12249,7 @@ test_man_frag_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
fheap_heap_state_t state; /* State of fractal heap */
unsigned u, v; /* Local index variables */
/* Test description */
- const char *base_desc = "fragmenting direct blocks, then backfill and extend, then remove all objects %s";
+ const char *base_desc = "fragmenting direct blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -12449,7 +12423,7 @@ test_man_frag_2nd_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
unsigned u, v; /* Local index variables */
/* Test description */
const char *base_desc = "fill root direct blocks, then fragment 2nd level indirect block's direct "
- "blocks, then backfill and extend, then remove all objects %s";
+ "blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -12561,7 +12535,7 @@ test_man_frag_3rd_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
/* Test description */
const char *base_desc =
"fill root direct blocks and 2nd level indirect blocks, then fragment 3rd level indirect block's "
- "direct blocks, then backfill and extend, then remove all objects %s";
+ "direct blocks, then backfill and extend, then remove all objects";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, &fill_size) < 0)
@@ -12675,7 +12649,7 @@ test_huge_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert one huge object, then remove %s";
+ const char *base_desc = "insert one huge object, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -12826,7 +12800,7 @@ test_huge_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert two huge objects, then remove %s";
+ const char *base_desc = "insert two huge objects, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -13057,7 +13031,7 @@ test_huge_insert_three(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert three huge objects, then remove %s";
+ const char *base_desc = "insert three huge objects, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -13364,7 +13338,7 @@ test_huge_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert mix of normal & huge objects, then remove %s";
+ const char *base_desc = "insert mix of normal & huge objects, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -13788,7 +13762,7 @@ test_filtered_huge(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam
hbool_t huge_ids_direct; /* Are 'huge' objects directly accessed? */
hbool_t pline_init = FALSE; /* Whether the I/O pipeline has been initialized */
/* Test description */
- const char *base_desc = "insert 'huge' object into heap with I/O filters, then remove %s";
+ const char *base_desc = "insert 'huge' object into heap with I/O filters, then remove";
/* Copy heap creation properties */
HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
@@ -13996,7 +13970,7 @@ test_tiny_insert_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert one tiny object, then remove %s";
+ const char *base_desc = "insert one tiny object, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -14147,7 +14121,7 @@ test_tiny_insert_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert two tiny objects, then remove %s";
+ const char *base_desc = "insert two tiny objects, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -14383,7 +14357,7 @@ test_tiny_insert_mix(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpar
unsigned char obj_type; /* Type of storage for object */
fheap_heap_state_t state; /* State of fractal heap */
/* Test description */
- const char *base_desc = "insert mix of normal, huge & tiny objects, then remove %s";
+ const char *base_desc = "insert mix of normal, huge & tiny objects, then remove";
/* Perform common test initialization operations */
if (begin_test(tparam, base_desc, &keep_ids, NULL) < 0)
@@ -14984,7 +14958,7 @@ test_filtered_man_root_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_para
fheap_heap_state_t state; /* State of fractal heap */
unsigned deflate_level; /* Deflation level */
/* Test description */
- const char *base_desc = "insert one 'managed' object into heap with I/O filters, then remove %s";
+ const char *base_desc = "insert one 'managed' object into heap with I/O filters, then remove";
/* Copy heap creation properties */
HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
@@ -15158,7 +15132,7 @@ test_filtered_man_root_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_pa
fheap_heap_state_t state; /* State of fractal heap */
unsigned deflate_level; /* Deflation level */
/* Test description */
- const char *base_desc = "insert two 'managed' objects into heap with I/O filters, then remove %s";
+ const char *base_desc = "insert two 'managed' objects into heap with I/O filters, then remove";
/* Copy heap creation properties */
HDmemcpy(&tmp_cparam, cparam, sizeof(H5HF_create_t));
diff --git a/test/filter_plugin.c b/test/filter_plugin.c
index c373b3b..dd61a43 100644
--- a/test/filter_plugin.c
+++ b/test/filter_plugin.c
@@ -968,16 +968,6 @@ test_path_api_calls(void)
*/
n_starting_paths = 42;
- /* Check that initialization is correct */
- TESTING(" initialize");
-
- if (H5PLsize(&n_paths) < 0)
- TEST_ERROR;
- if (n_paths != 2)
- TEST_ERROR;
-
- PASSED();
-
/****************/
/* H5PLremove() */
/****************/
diff --git a/test/flush1.c b/test/flush1.c
index f5c4e2b..0ddceb6 100644
--- a/test/flush1.c
+++ b/test/flush1.c
@@ -81,7 +81,7 @@ create_file(const char *filename, hid_t fapl_id, hbool_t swmr)
if ((top_gid = H5Gcreate2(fid, "top_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
STACK_ERROR
for (i = 0; i < NGROUPS; i++) {
- HDsprintf(group_name, "group%02d", i);
+ HDsnprintf(group_name, sizeof(group_name), "group%02d", i);
if ((gid = H5Gcreate2(top_gid, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
STACK_ERROR
if (H5Gclose(gid) < 0)
diff --git a/test/flush2.c b/test/flush2.c
index bf1187b..b6c7761 100644
--- a/test/flush2.c
+++ b/test/flush2.c
@@ -144,7 +144,7 @@ file_ok(const char *filename, hid_t fapl_id, hbool_t check_second_dset)
if ((top_gid = H5Gopen2(fid, "top_group", H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < NGROUPS; i++) {
- HDsprintf(group_name, "group%02d", i);
+ HDsnprintf(group_name, sizeof(group_name), "group%02d", i);
if ((gid = H5Gopen2(top_gid, group_name, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
diff --git a/test/flushrefresh.c b/test/flushrefresh.c
index 2757bbf..0c10238 100644
--- a/test/flushrefresh.c
+++ b/test/flushrefresh.c
@@ -93,7 +93,7 @@ FILE *errorfile;
/* ===================== */
/* Main */
-int main(int argc, const char *argv[]);
+int main(int argc, char *argv[]);
/* Flush Test Framework */
herr_t test_flush(void);
@@ -137,7 +137,7 @@ herr_t end_verification(void);
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
/* Variables */
const char *envval = NULL;
diff --git a/test/genall5.c b/test/genall5.c
index f3b0e6a..ca96eeb 100644
--- a/test/genall5.c
+++ b/test/genall5.c
@@ -306,7 +306,7 @@ ns_grp_c(hid_t fid, const char *group_name, unsigned nlinks)
while ((pass) && (u < nlinks)) {
char linkname[16];
- HDsprintf(linkname, "%u", u);
+ HDsnprintf(linkname, sizeof(linkname), "%u", u);
if (0 == (u % 3)) {
ret = H5Lcreate_soft(group_name, gid, linkname, H5P_DEFAULT, H5P_DEFAULT);
@@ -474,7 +474,7 @@ vrfy_ns_grp_c(hid_t fid, const char *group_name, unsigned nlinks)
char linkname[16];
htri_t link_exists;
- HDsprintf(linkname, "%u", u);
+ HDsnprintf(linkname, sizeof(linkname), "%u", u);
link_exists = H5Lexists(gid, linkname, H5P_DEFAULT);
if (link_exists < 0) {
@@ -715,7 +715,7 @@ ns_grp_d(hid_t fid, const char *group_name, unsigned nlinks)
while ((pass) && (u < nlinks)) {
char linkname[16];
- HDsprintf(linkname, "%u", u);
+ HDsnprintf(linkname, sizeof(linkname), "%u", u);
if (0 == (u % 3)) {
ret = H5Lcreate_soft(group_name, gid, linkname, H5P_DEFAULT, H5P_DEFAULT);
@@ -883,7 +883,7 @@ vrfy_ns_grp_d(hid_t fid, const char *group_name, unsigned nlinks)
char linkname[16];
htri_t link_exists;
- HDsprintf(linkname, "%u", u);
+ HDsnprintf(linkname, sizeof(linkname), "%u", u);
link_exists = H5Lexists(gid, linkname, H5P_DEFAULT);
if (link_exists < 0) {
@@ -1321,7 +1321,7 @@ os_grp_n(hid_t fid, const char *group_name, int proc_num, unsigned nlinks)
while ((pass) && (u < nlinks)) {
char linkname[32];
- HDsprintf(linkname, "ln%d_%u", proc_num, u);
+ HDsnprintf(linkname, sizeof(linkname), "ln%d_%u", proc_num, u);
if (0 == (u % 2)) {
ret = H5Lcreate_soft(group_name, gid, linkname, H5P_DEFAULT, H5P_DEFAULT);
@@ -1479,7 +1479,7 @@ vrfy_os_grp_n(hid_t fid, const char *group_name, int proc_num, unsigned nlinks)
char linkname[32];
htri_t link_exists;
- HDsprintf(linkname, "ln%d_%u", proc_num, u);
+ HDsnprintf(linkname, sizeof(linkname), "ln%d_%u", proc_num, u);
link_exists = H5Lexists(gid, linkname, H5P_DEFAULT);
if (link_exists < 0) {
@@ -3056,26 +3056,26 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
/* Add & verify an empty "new style" group */
if (pass) {
- HDsprintf(full_path, "%s/A", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/A", base_path);
HDassert(HDstrlen(full_path) < 1024);
ns_grp_0(fid, full_path);
}
if (pass) {
- HDsprintf(full_path, "%s/A", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/A", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ns_grp_0(fid, full_path);
}
/* Add & verify a compact "new style" group (3 link messages) */
if (pass) {
- HDsprintf(full_path, "%s/B", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/B", base_path);
HDassert(HDstrlen(full_path) < 1024);
ns_grp_c(fid, full_path, 3);
}
if (pass) {
- HDsprintf(full_path, "%s/B", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/B", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ns_grp_c(fid, full_path, 3);
}
@@ -3084,26 +3084,26 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* fractal heap)
*/
if (pass) {
- HDsprintf(full_path, "%s/C", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/C", base_path);
HDassert(HDstrlen(full_path) < 1024);
ns_grp_d(fid, full_path, 300);
}
if (pass) {
- HDsprintf(full_path, "%s/C", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/C", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ns_grp_d(fid, full_path, 300);
}
/* Add & verify an empty "old style" group to file */
if (pass) {
- HDsprintf(full_path, "%s/D", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/D", base_path);
HDassert(HDstrlen(full_path) < 1024);
os_grp_0(fid, full_path);
}
if (pass) {
- HDsprintf(full_path, "%s/D", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/D", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_os_grp_0(fid, full_path);
}
@@ -3112,13 +3112,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* local heap) to file
*/
if (pass) {
- HDsprintf(full_path, "%s/E", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/E", base_path);
HDassert(HDstrlen(full_path) < 1024);
os_grp_n(fid, full_path, proc_num, 300);
}
if (pass) {
- HDsprintf(full_path, "%s/E", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/E", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_os_grp_n(fid, full_path, proc_num, 300);
}
@@ -3127,13 +3127,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* to file
*/
if (pass) {
- HDsprintf(full_path, "%s/F", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/F", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_ctg_i(fid, full_path, FALSE);
}
if (pass) {
- HDsprintf(full_path, "%s/F", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/F", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_i(fid, full_path, FALSE);
}
@@ -3142,13 +3142,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* to file
*/
if (pass) {
- HDsprintf(full_path, "%s/G", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/G", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_ctg_i(fid, full_path, TRUE);
}
if (pass) {
- HDsprintf(full_path, "%s/G", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/G", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_i(fid, full_path, TRUE);
}
@@ -3157,13 +3157,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* to file
*/
if (pass) {
- HDsprintf(full_path, "%s/H", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/H", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_chk_i(fid, full_path, FALSE);
}
if (pass) {
- HDsprintf(full_path, "%s/H", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/H", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_chk_i(fid, full_path, FALSE);
}
@@ -3172,13 +3172,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* to file
*/
if (pass) {
- HDsprintf(full_path, "%s/I", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/I", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_chk_i(fid, full_path, TRUE);
}
if (pass) {
- HDsprintf(full_path, "%s/I", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/I", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_chk_i(fid, full_path, TRUE);
}
@@ -3187,13 +3187,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* to file
*/
if (pass) {
- HDsprintf(full_path, "%s/J", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/J", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_cpt_i(fid, full_path, FALSE);
}
if (pass) {
- HDsprintf(full_path, "%s/J", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/J", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_cpt_i(fid, full_path, FALSE);
}
@@ -3202,13 +3202,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* to file
*/
if (pass) {
- HDsprintf(full_path, "%s/K", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/K", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_cpt_i(fid, full_path, TRUE);
}
if (pass) {
- HDsprintf(full_path, "%s/K", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/K", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_cpt_i(fid, full_path, TRUE);
}
@@ -3217,13 +3217,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* (but no data) to file
*/
if (pass) {
- HDsprintf(full_path, "%s/L", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/L", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_ctg_v(fid, full_path, FALSE);
}
if (pass) {
- HDsprintf(full_path, "%s/L", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/L", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_v(fid, full_path, FALSE);
}
@@ -3232,13 +3232,13 @@ create_zoo(hid_t fid, const char *base_path, int proc_num)
* (and data) to file
*/
if (pass) {
- HDsprintf(full_path, "%s/M", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/M", base_path);
HDassert(HDstrlen(full_path) < 1024);
ds_ctg_v(fid, full_path, TRUE);
}
if (pass) {
- HDsprintf(full_path, "%s/M", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/M", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_v(fid, full_path, TRUE);
}
@@ -3281,14 +3281,14 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
/* validate an empty "new style" group */
if (pass) {
- HDsprintf(full_path, "%s/A", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/A", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ns_grp_0(fid, full_path);
}
/* validate a compact "new style" group (3 link messages) */
if (pass) {
- HDsprintf(full_path, "%s/B", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/B", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ns_grp_c(fid, full_path, 3);
}
@@ -3297,14 +3297,14 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* fractal heap)
*/
if (pass) {
- HDsprintf(full_path, "%s/C", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/C", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ns_grp_d(fid, full_path, 300);
}
/* validate an empty "old style" group in file */
if (pass) {
- HDsprintf(full_path, "%s/D", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/D", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_os_grp_0(fid, full_path);
}
@@ -3313,7 +3313,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* local heap)
*/
if (pass) {
- HDsprintf(full_path, "%s/E", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/E", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_os_grp_n(fid, full_path, proc_num, 300);
}
@@ -3322,7 +3322,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* in file.
*/
if (pass) {
- HDsprintf(full_path, "%s/F", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/F", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_i(fid, full_path, FALSE);
}
@@ -3331,7 +3331,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* in file.
*/
if (pass) {
- HDsprintf(full_path, "%s/G", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/G", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_i(fid, full_path, TRUE);
}
@@ -3340,7 +3340,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* in file
*/
if (pass) {
- HDsprintf(full_path, "%s/H", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/H", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_chk_i(fid, full_path, FALSE);
}
@@ -3349,7 +3349,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* in file
*/
if (pass) {
- HDsprintf(full_path, "%s/I", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/I", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_chk_i(fid, full_path, TRUE);
}
@@ -3358,7 +3358,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* in file
*/
if (pass) {
- HDsprintf(full_path, "%s/J", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/J", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_cpt_i(fid, full_path, FALSE);
}
@@ -3367,7 +3367,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* in file
*/
if (pass) {
- HDsprintf(full_path, "%s/K", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/K", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_cpt_i(fid, full_path, TRUE);
}
@@ -3376,7 +3376,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* (but no data) to file
*/
if (pass) {
- HDsprintf(full_path, "%s/L", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/L", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_v(fid, full_path, FALSE);
}
@@ -3385,7 +3385,7 @@ validate_zoo(hid_t fid, const char *base_path, int proc_num)
* (and data) to file
*/
if (pass) {
- HDsprintf(full_path, "%s/M", base_path);
+ HDsnprintf(full_path, sizeof(full_path), "%s/M", base_path);
HDassert(HDstrlen(full_path) < 1024);
vrfy_ds_ctg_v(fid, full_path, TRUE);
}
diff --git a/test/h5test.c b/test/h5test.c
index 0528623..ac15043 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -1773,41 +1773,46 @@ dummy_vfd_write(H5FD_t H5_ATTR_UNUSED *_file, H5FD_mem_t H5_ATTR_UNUSED type, hi
/* Dummy VFD with the minimum parameters to make a VFD that can be registered */
#define DUMMY_VFD_VALUE (H5FD_class_value_t)155
static const H5FD_class_t H5FD_dummy_g = {
- DUMMY_VFD_VALUE, /* value */
- "dummy", /* name */
- 1, /* maxaddr */
- H5F_CLOSE_WEAK, /* fc_degree */
- NULL, /* terminate */
- NULL, /* sb_size */
- NULL, /* sb_encode */
- NULL, /* sb_decode */
- 0, /* fapl_size */
- NULL, /* fapl_get */
- NULL, /* fapl_copy */
- NULL, /* fapl_free */
- 0, /* dxpl_size */
- NULL, /* dxpl_copy */
- NULL, /* dxpl_free */
- dummy_vfd_open, /* open */
- dummy_vfd_close, /* close */
- NULL, /* cmp */
- NULL, /* query */
- NULL, /* get_type_map */
- NULL, /* alloc */
- NULL, /* free */
- dummy_vfd_get_eoa, /* get_eoa */
- dummy_vfd_set_eoa, /* set_eoa */
- dummy_vfd_get_eof, /* get_eof */
- NULL, /* get_handle */
- dummy_vfd_read, /* read */
- dummy_vfd_write, /* write */
- NULL, /* flush */
- NULL, /* truncate */
- NULL, /* lock */
- NULL, /* unlock */
- NULL, /* del */
- NULL, /* ctl */
- H5FD_FLMAP_DICHOTOMY /* fl_map */
+ H5FD_CLASS_VERSION, /* struct version */
+ DUMMY_VFD_VALUE, /* value */
+ "dummy", /* name */
+ 1, /* maxaddr */
+ H5F_CLOSE_WEAK, /* fc_degree */
+ NULL, /* terminate */
+ NULL, /* sb_size */
+ NULL, /* sb_encode */
+ NULL, /* sb_decode */
+ 0, /* fapl_size */
+ NULL, /* fapl_get */
+ NULL, /* fapl_copy */
+ NULL, /* fapl_free */
+ 0, /* dxpl_size */
+ NULL, /* dxpl_copy */
+ NULL, /* dxpl_free */
+ dummy_vfd_open, /* open */
+ dummy_vfd_close, /* close */
+ NULL, /* cmp */
+ NULL, /* query */
+ NULL, /* get_type_map */
+ NULL, /* alloc */
+ NULL, /* free */
+ dummy_vfd_get_eoa, /* get_eoa */
+ dummy_vfd_set_eoa, /* set_eoa */
+ dummy_vfd_get_eof, /* get_eof */
+ NULL, /* get_handle */
+ dummy_vfd_read, /* read */
+ dummy_vfd_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
+ NULL, /* flush */
+ NULL, /* truncate */
+ NULL, /* lock */
+ NULL, /* unlock */
+ NULL, /* del */
+ NULL, /* ctl */
+ H5FD_FLMAP_DICHOTOMY /* fl_map */
};
/*-------------------------------------------------------------------------
diff --git a/test/links.c b/test/links.c
index 2fead45..658f8d6 100644
--- a/test/links.c
+++ b/test/links.c
@@ -2144,6 +2144,7 @@ cklinks_deprec(hid_t fapl, hbool_t new_format)
char linkval[LINK_BUF_SIZE];
char filename[NAME_BUF_SIZE];
herr_t status;
+ htri_t exists;
if (new_format)
TESTING("link queries using deprecated routines (w/new group format)")
@@ -2183,20 +2184,20 @@ cklinks_deprec(hid_t fapl, hbool_t new_format)
FAIL_STACK_ERROR
H5E_BEGIN_TRY
{
- status = H5Lexists(file, "no_grp1/hard", H5P_DEFAULT);
+ exists = H5Lexists(file, "no_grp1/hard", H5P_DEFAULT);
}
H5E_END_TRY;
- if (status >= 0) {
+ if (exists >= 0) {
H5_FAILED();
HDputs(" H5Lexists() should have failed for a path with missing components.");
TEST_ERROR
} /* end if */
H5E_BEGIN_TRY
{
- status = H5Lexists(file, "/no_grp1/hard", H5P_DEFAULT);
+ exists = H5Lexists(file, "/no_grp1/hard", H5P_DEFAULT);
}
H5E_END_TRY;
- if (status >= 0) {
+ if (exists >= 0) {
H5_FAILED();
HDputs(" H5Lexists() should have failed for a path with missing components.");
TEST_ERROR
diff --git a/test/null_vfd_plugin.c b/test/null_vfd_plugin.c
index f41da00..ca59939 100644
--- a/test/null_vfd_plugin.c
+++ b/test/null_vfd_plugin.c
@@ -35,41 +35,46 @@ static herr_t H5FD_null_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr);
static haddr_t H5FD_null_get_eof(const H5FD_t *_file, H5FD_mem_t type);
static const H5FD_class_t H5FD_null_g = {
- NULL_VFD_VALUE, /* value */
- NULL_VFD_NAME, /* name */
- 1, /* maxaddr */
- H5F_CLOSE_WEAK, /* fc_degree */
- NULL, /* terminate */
- NULL, /* sb_size */
- NULL, /* sb_encode */
- NULL, /* sb_decode */
- 0, /* fapl_size */
- NULL, /* fapl_get */
- NULL, /* fapl_copy */
- NULL, /* fapl_free */
- 0, /* dxpl_size */
- NULL, /* dxpl_copy */
- NULL, /* dxpl_free */
- H5FD_null_open, /* open */
- H5FD_null_close, /* close */
- NULL, /* cmp */
- NULL, /* query */
- NULL, /* get_type_map */
- NULL, /* alloc */
- NULL, /* free */
- H5FD_null_get_eoa, /* get_eoa */
- H5FD_null_set_eoa, /* set_eoa */
- H5FD_null_get_eof, /* get_eof */
- NULL, /* get_handle */
- H5FD_null_read, /* read */
- H5FD_null_write, /* write */
- NULL, /* flush */
- NULL, /* truncate */
- NULL, /* lock */
- NULL, /* unlock */
- NULL, /* del */
- NULL, /* ctl */
- H5FD_FLMAP_DICHOTOMY /* fl_map */
+ H5FD_CLASS_VERSION, /* struct version */
+ NULL_VFD_VALUE, /* value */
+ NULL_VFD_NAME, /* name */
+ 1, /* maxaddr */
+ H5F_CLOSE_WEAK, /* fc_degree */
+ NULL, /* terminate */
+ NULL, /* sb_size */
+ NULL, /* sb_encode */
+ NULL, /* sb_decode */
+ 0, /* fapl_size */
+ NULL, /* fapl_get */
+ NULL, /* fapl_copy */
+ NULL, /* fapl_free */
+ 0, /* dxpl_size */
+ NULL, /* dxpl_copy */
+ NULL, /* dxpl_free */
+ H5FD_null_open, /* open */
+ H5FD_null_close, /* close */
+ NULL, /* cmp */
+ NULL, /* query */
+ NULL, /* get_type_map */
+ NULL, /* alloc */
+ NULL, /* free */
+ H5FD_null_get_eoa, /* get_eoa */
+ H5FD_null_set_eoa, /* set_eoa */
+ H5FD_null_get_eof, /* get_eof */
+ NULL, /* get_handle */
+ H5FD_null_read, /* read */
+ H5FD_null_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
+ NULL, /* flush */
+ NULL, /* truncate */
+ NULL, /* lock */
+ NULL, /* unlock */
+ NULL, /* del */
+ NULL, /* ctl */
+ H5FD_FLMAP_DICHOTOMY /* fl_map */
};
static H5FD_t *
diff --git a/test/page_buffer.c b/test/page_buffer.c
index c078ce8..558b4e9 100644
--- a/test/page_buffer.c
+++ b/test/page_buffer.c
@@ -120,28 +120,28 @@ create_file(char *filename, hid_t fcpl, hid_t fapl)
for (i = 0; i < NUM_DSETS; i++) {
- HDsprintf(dset_name, "D1dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", i);
if ((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl,
H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR;
if (H5Dclose(dset_id) < 0)
FAIL_STACK_ERROR;
- HDsprintf(dset_name, "D2dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", i);
if ((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl,
H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR;
if (H5Dclose(dset_id) < 0)
FAIL_STACK_ERROR;
- HDsprintf(dset_name, "D3dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", i);
if ((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl,
H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR;
if (H5Dclose(dset_id) < 0)
FAIL_STACK_ERROR;
- HDsprintf(dset_name, "dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", i);
if ((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl,
H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR;
@@ -166,13 +166,13 @@ create_file(char *filename, hid_t fcpl, hid_t fapl)
}
}
- HDsprintf(dset_name, "D1dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", i);
if (H5Ldelete(grp_id, dset_name, H5P_DEFAULT) < 0)
FAIL_STACK_ERROR;
- HDsprintf(dset_name, "D2dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", i);
if (H5Ldelete(grp_id, dset_name, H5P_DEFAULT) < 0)
FAIL_STACK_ERROR;
- HDsprintf(dset_name, "D3dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", i);
if (H5Ldelete(grp_id, dset_name, H5P_DEFAULT) < 0)
FAIL_STACK_ERROR;
}
@@ -259,7 +259,7 @@ open_file(char *filename, hid_t fapl, hsize_t page_size, size_t page_buffer_size
for (i = 0; i < NUM_DSETS; i++) {
- HDsprintf(dset_name, "dset%d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", i);
if ((dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT)) < 0)
FAIL_STACK_ERROR;
@@ -370,7 +370,7 @@ error:
* 1) verifying that API errors are caught.
*
* 2) verifying that the page buffer behaves more or less
- * as advertized.
+ * as advertised.
*
* Any data mis-matches or unexpected failures or successes
* reported by the HDF5 library result in test failure.
diff --git a/test/pool.c b/test/pool.c
deleted file mode 100644
index c508025..0000000
--- a/test/pool.c
+++ /dev/null
@@ -1,794 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/* Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- */
-#include "h5test.h"
-
-/*
- * This file needs to access private datatypes from the H5MP package.
- * This file also needs to access the memory pool testing code.
- */
-#define H5MP_FRIEND /*suppress error about including H5MPpkg */
-#define H5MP_TESTING
-#include "H5MPpkg.h" /* Memory Pools */
-
-/* Other private headers that this test requires */
-
-/* Local macros */
-#define MPOOL_PAGE_SIZE H5MP_PAGE_SIZE_DEFAULT
-#define MPOOL_FLAGS H5MP_FLG_DEFAULT
-#define MPOOL_NUM_NORMAL_BLOCKS 15
-#define MPOOL_NORMAL_BLOCK 512
-#define MPOOL_LARGE_BLOCK (MPOOL_PAGE_SIZE * 3)
-#define MPOOL_NUM_SMALL_BLOCKS 64
-#define MPOOL_SMALL_BLOCK 1
-#define MPOOL_NUM_RANDOM (10 * 1024)
-#define MPOOL_RANDOM_MAX_SIZE (MPOOL_PAGE_SIZE * 2)
-
-/*-------------------------------------------------------------------------
- * Function: test_create
- *
- * Purpose: Test trivial creating & closing memory pool
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_create(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
- H5MP_page_t *page; /* Memory pool page */
- size_t free_size; /* Free size in pool */
-
- /*
- * Test memory pool creation
- */
- TESTING("memory pool creation");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Check free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != 0)
- TEST_ERROR
-
- /* Check first page */
- if (H5MP_get_pool_first_page(mp, &page) < 0)
- TEST_ERROR
- if (page != NULL)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- return 0;
-
-error:
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_create() */
-
-/*-------------------------------------------------------------------------
- * Function: test_close_one
- *
- * Purpose: Tests closing pool with one block allocated
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Friday, May 6, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_close_one(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
-
- /*
- * Test memory pool closing
- */
- TESTING("closing pool with blocks still allocated in one page");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- if (NULL == H5MP_malloc(mp, (size_t)MPOOL_NORMAL_BLOCK))
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- return 0;
-
-error:
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_close_one() */
-
-/*-------------------------------------------------------------------------
- * Function: test_allocate_first
- *
- * Purpose: Tests allocating first block in pool
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_allocate_first(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
- H5MP_page_t *page; /* Memory pool page */
- size_t free_size; /* Free size in pool */
- void * spc; /* Pointer to space allocated */
-
- /*
- * Test memory pool allocation
- */
- TESTING("allocating first block in pool");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- if (NULL == (spc = H5MP_malloc(mp, (size_t)MPOOL_NORMAL_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size !=
- MPOOL_PAGE_SIZE - (H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Get first page */
- if (H5MP_get_pool_first_page(mp, &page) < 0)
- TEST_ERROR
- if (page == NULL)
- TEST_ERROR
-
- /* Check page's free space */
- if (H5MP_get_page_free_size(page, &free_size) < 0)
- TEST_ERROR
- if (free_size !=
- MPOOL_PAGE_SIZE - (H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Check next page */
- if (H5MP_get_page_next_page(page, &page) < 0)
- TEST_ERROR
- if (page != NULL)
- TEST_ERROR
-
- /* Free space in pool */
- H5MP_free(mp, spc);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != MPOOL_PAGE_SIZE - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t)))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- TESTING("allocating large first block in pool");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- if (NULL == (spc = H5MP_malloc(mp, (size_t)MPOOL_LARGE_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != 0)
- TEST_ERROR
-
- /* Get first page */
- if (H5MP_get_pool_first_page(mp, &page) < 0)
- TEST_ERROR
- if (page == NULL)
- TEST_ERROR
-
- /* Check page's free space */
- if (H5MP_get_page_free_size(page, &free_size) < 0)
- TEST_ERROR
- if (free_size != 0)
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Check next page */
- if (H5MP_get_page_next_page(page, &page) < 0)
- TEST_ERROR
- if (page != NULL)
- TEST_ERROR
-
- /* Free space in pool */
- H5MP_free(mp, spc);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != MPOOL_LARGE_BLOCK + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- return 0;
-
-error:
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_allocate_first() */
-
-/*-------------------------------------------------------------------------
- * Function: test_allocate_split
- *
- * Purpose: Tests allocating block in pool that requires splitting
- * existing block
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_allocate_split(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
- size_t free_size; /* Free size in pool */
- void * spc1; /* Pointer to space allocated */
- void * spc2; /* Pointer to space allocated */
-
- /*
- * Test memory pool allocation
- */
- TESTING("splitting block in pool");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- if (NULL == (spc1 = H5MP_malloc(mp, (size_t)MPOOL_NORMAL_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size !=
- MPOOL_PAGE_SIZE - (H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Allocate more space in pool */
- if (NULL == (spc2 = H5MP_malloc(mp, (size_t)MPOOL_NORMAL_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size !=
- MPOOL_PAGE_SIZE -
- (((H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t))) * 2) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Free first block in pool */
- H5MP_free(mp, spc1);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size !=
- MPOOL_PAGE_SIZE - (H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Free second block in pool (should merge with first block) */
- H5MP_free(mp, spc2);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != MPOOL_PAGE_SIZE - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t)))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- return 0;
-
-error:
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_allocate_split() */
-
-/*-------------------------------------------------------------------------
- * Function: test_allocate_many_small
- *
- * Purpose: Tests allocating many small blocks in a pool
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 6, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_allocate_many_small(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
- size_t free_size; /* Free size in pool */
- void * spc[MPOOL_NUM_SMALL_BLOCKS]; /* Pointers to space allocated */
- int i; /* Local index variable */
-
- /*
- * Test memory pool allocation
- */
- TESTING("allocating many small blocks");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- for (i = 0; i < MPOOL_NUM_SMALL_BLOCKS; i++)
- if (NULL == (spc[i] = H5MP_malloc(mp, (size_t)MPOOL_SMALL_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != MPOOL_PAGE_SIZE -
- (((H5MP_BLOCK_ALIGN(MPOOL_SMALL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t))) *
- MPOOL_NUM_SMALL_BLOCKS) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Free blocks in pool */
- /* (Tests free block merging with block after it */
- for (i = (MPOOL_NUM_SMALL_BLOCKS - 1); i >= 0; i--)
- H5MP_free(mp, spc[i]);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != MPOOL_PAGE_SIZE - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t)))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- return 0;
-
-error:
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_allocate_many_small() */
-
-/*-------------------------------------------------------------------------
- * Function: test_allocate_new_page
- *
- * Purpose: Tests allocating block in pool that requires allocating
- * new page
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Friday, May 6, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_allocate_new_page(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
- size_t free_size; /* Free size in pool */
- size_t u; /* Local index variable */
- void * spc[MPOOL_NUM_NORMAL_BLOCKS]; /* Pointer to space allocated */
- void * spc1; /* Pointer to space allocated */
- void * spc2; /* Pointer to space allocated */
-
- /*
- * Test memory pool allocation
- */
- TESTING("allocate normal-sized block in new page");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- for (u = 0; u < MPOOL_NUM_NORMAL_BLOCKS; u++)
- if (NULL == (spc[u] = H5MP_malloc(mp, (size_t)MPOOL_NORMAL_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != (MPOOL_PAGE_SIZE * 3) - (((H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t))) *
- MPOOL_NUM_NORMAL_BLOCKS) +
- (H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t)) * 3)))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Free blocks in pool */
- /* (Free alternating blocks, in two passes, which tests block merging w/both neighbors) */
- for (u = 0; u < MPOOL_NUM_NORMAL_BLOCKS; u += 2)
- H5MP_free(mp, spc[u]);
- for (u = 1; u < MPOOL_NUM_NORMAL_BLOCKS; u += 2)
- H5MP_free(mp, spc[u]);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != ((MPOOL_PAGE_SIZE - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))) * 3))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- TESTING("allocate large-sized block in new page");
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space in pool */
- /* (Normal sized block) */
- if (NULL == (spc1 = H5MP_malloc(mp, (size_t)MPOOL_NORMAL_BLOCK)))
- TEST_ERROR
- /* (Larger sized block) */
- if (NULL == (spc2 = H5MP_malloc(mp, (size_t)MPOOL_LARGE_BLOCK)))
- TEST_ERROR
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size !=
- MPOOL_PAGE_SIZE - (H5MP_BLOCK_ALIGN(MPOOL_NORMAL_BLOCK) + H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t)) +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Free blocks in pool */
- H5MP_free(mp, spc1);
- H5MP_free(mp, spc2);
-
- /* Check pool's free space */
- if (H5MP_get_pool_free_size(mp, &free_size) < 0)
- TEST_ERROR
- if (free_size != ((MPOOL_PAGE_SIZE - H5MP_BLOCK_ALIGN(sizeof(H5MP_page_t))) + MPOOL_LARGE_BLOCK +
- H5MP_BLOCK_ALIGN(sizeof(H5MP_page_blk_t))))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- PASSED();
-
- return 0;
-
-error:
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_allocate_new_page() */
-
-/*-------------------------------------------------------------------------
- * Function: test_allocate_random
- *
- * Purpose: Tests allocating random sized blocks in pool
- *
- * Return: Success: 0
- * Failure: 1
- *
- * Programmer: Quincey Koziol
- * Friday, May 6, 2005
- *
- *-------------------------------------------------------------------------
- */
-static int
-test_allocate_random(void)
-{
- H5MP_pool_t *mp; /* Memory pool */
- size_t u; /* Local index variable */
- time_t curr_time; /* Current time, for seeding random number generator */
- size_t * blk_size = NULL; /* Pointer to block sizes */
- void ** spc = NULL; /* Pointer to space allocated */
- size_t swap_idx; /* Location to swap with when shuffling */
- void * swap_ptr; /* Pointer to swap when shuffling */
-
- /*
- * Test memory pool allocation
- */
- TESTING("allocate many random sized blocks");
-
- /* Initialize random number seed */
- curr_time = HDtime(NULL);
-#if 0
-curr_time=1115412944;
-HDfprintf(stderr,"curr_time=%lu\n",(unsigned long)curr_time);
-#endif
- HDsrandom((unsigned)curr_time);
-
- /* Create a memory pool */
- if (NULL == (mp = H5MP_create((size_t)MPOOL_PAGE_SIZE, MPOOL_FLAGS)))
- TEST_ERROR
-
- /* Allocate space for the block sizes */
- if (NULL == (blk_size = (size_t *)HDmalloc(sizeof(size_t) * MPOOL_NUM_RANDOM)))
- TEST_ERROR
-
- /* Allocate space for the block pointers */
- if (NULL == (spc = (void **)HDmalloc(sizeof(void *) * MPOOL_NUM_RANDOM)))
- TEST_ERROR
-
- /* Initialize the block sizes with random values */
- for (u = 0; u < MPOOL_NUM_RANDOM; u++)
- blk_size[u] = (size_t)(HDrandom() % MPOOL_RANDOM_MAX_SIZE) + 1;
-
- /* Allocate space in pool */
- for (u = 0; u < MPOOL_NUM_RANDOM; u++)
- if (NULL == (spc[u] = H5MP_malloc(mp, blk_size[u])))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Shuffle pointers to free */
- for (u = 0; u < MPOOL_NUM_RANDOM; u++) {
- swap_idx = (size_t)(HDrandom() % (int)(MPOOL_NUM_RANDOM - u)) + u;
- swap_ptr = spc[u];
- spc[u] = spc[swap_idx];
- spc[swap_idx] = swap_ptr;
- } /* end for */
-
- /* Free blocks in pool */
- for (u = 0; u < MPOOL_NUM_RANDOM; u++)
- H5MP_free(mp, spc[u]);
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Initialize the block sizes with random values */
- for (u = 0; u < MPOOL_NUM_RANDOM; u++)
- blk_size[u] = (size_t)(HDrandom() % MPOOL_RANDOM_MAX_SIZE) + 1;
-
- /* Allocate space in pool (again) */
- /* (Leave allocated to test closing pool with many blocks still allocated) */
- for (u = 0; u < MPOOL_NUM_RANDOM; u++)
- if (NULL == (spc[u] = H5MP_malloc(mp, blk_size[u])))
- TEST_ERROR
-
- /* Check that free space totals match */
- if (H5MP_pool_is_free_size_correct(mp) <= 0)
- TEST_ERROR
-
- /* Close the memory pool */
- if (H5MP_close(mp) < 0)
- TEST_ERROR
-
- /* Free memory for block sizes & pointers */
- HDfree(blk_size);
- HDfree(spc);
-
- PASSED();
-
- return 0;
-
-error:
- if (blk_size)
- HDfree(blk_size);
- if (spc)
- HDfree(spc);
- H5E_BEGIN_TRY
- {
- if (mp)
- H5MP_close(mp);
- }
- H5E_END_TRY;
-
- return 1;
-} /* test_allocate_random() */
-
-/*-------------------------------------------------------------------------
- * Function: main
- *
- * Purpose: Test the memory pool code
- *
- * Return: Success:
- * Failure:
- *
- * Programmer: Quincey Koziol
- * Tuesday, May 3, 2005
- *
- *-------------------------------------------------------------------------
- */
-int
-main(void)
-{
- int nerrors = 0;
-
- /* Reset library */
- h5_reset();
-
- /* Test memory pool creation */
- nerrors += test_create();
-
- /* Test memory pool space closing */
- nerrors += test_close_one();
-
- /* Test memory pool space allocation */
- nerrors += test_allocate_first();
- nerrors += test_allocate_split();
- nerrors += test_allocate_many_small();
- nerrors += test_allocate_new_page();
- nerrors += test_allocate_random();
-
- if (nerrors)
- goto error;
- HDputs("All memory pool tests passed.");
-
- return 0;
-
-error:
- HDputs("*** TESTS FAILED ***");
- return 1;
-}
diff --git a/test/reserved.c b/test/reserved.c
index f864329..08e747e 100644
--- a/test/reserved.c
+++ b/test/reserved.c
@@ -71,7 +71,7 @@ rsrv_heap(void)
}
H5E_END_TRY
- HDsprintf(dset_name, "Dset %d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "Dset %d", i);
H5E_BEGIN_TRY
{
@@ -115,7 +115,7 @@ rsrv_heap(void)
if (H5open() < 0)
TEST_ERROR;
- HDsprintf(dset_name, "Dset %d", i - 2);
+ HDsnprintf(dset_name, sizeof(dset_name), "Dset %d", i - 2);
file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
if (file_id < 0)
@@ -218,7 +218,7 @@ rsrv_ohdr(void)
} /* end for */
for (i = 0; i < 2000; i++) {
- HDsprintf(attrname, "attr %d", i);
+ HDsnprintf(attrname, sizeof(attrname), "attr %d", i);
H5E_BEGIN_TRY
{
aid = H5Screate_simple(2, dims, NULL);
diff --git a/test/swmr.c b/test/swmr.c
index 693ddc7..dccd945 100644
--- a/test/swmr.c
+++ b/test/swmr.c
@@ -2395,7 +2395,7 @@ error:
* (5) Parent: open a file with write access; enable SWMR writing mode
* Child: concurrent open of the file with write and SWMR write access (fail)
*/
-#ifndef H5_HAVE_UNISTD_H
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
static int
test_start_swmr_write_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t new_format)
@@ -2408,11 +2408,11 @@ test_start_swmr_write_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t new_format)
}
SKIPPED();
- HDputs(" Test skipped due to a lack of unistd.h functionality.");
+ HDputs(" Test skipped due to fork or waitpid not defined.");
return 0;
} /* test_start_swmr_write_concur() */
-#else /* H5_HAVE_UNISTD_H */
+#else /* !defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */
static int
test_start_swmr_write_concur(hid_t in_fapl, hbool_t new_format)
@@ -3014,7 +3014,7 @@ error:
return -1;
} /* test_start_swmr_write_concur() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* !defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */
/*
* test_start_swmr_write_stress_ohdr():
@@ -4809,7 +4809,7 @@ error:
** This is for concurrent access.
**
*****************************************************************/
-#ifndef H5_HAVE_UNISTD_H
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK))
static int
test_file_lock_concur(hid_t H5_ATTR_UNUSED in_fapl)
@@ -4817,12 +4817,12 @@ test_file_lock_concur(hid_t H5_ATTR_UNUSED in_fapl)
/* Output message about test being performed */
TESTING("File open with different combinations of flags--concurrent access");
SKIPPED();
- HDputs(" Test skipped due to a lack of unistd.h functionality.");
+ HDputs(" Test skipped due to fork or waitpid not defined.");
return 0;
} /* end test_file_lock_concur() */
-#else /* H5_HAVE_UNISTD_H */
+#else /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK)) */
static int
test_file_lock_concur(hid_t in_fapl)
@@ -5192,7 +5192,7 @@ error:
} /* end test_file_lock_concur() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* #if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) && defined(H5_HAVE_FLOCK)) */
/****************************************************************
**
@@ -5202,7 +5202,7 @@ error:
** This is for concurrent access.
**
*****************************************************************/
-#ifndef H5_HAVE_UNISTD_H
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
static int
test_file_lock_swmr_concur(hid_t H5_ATTR_UNUSED in_fapl)
@@ -5210,12 +5210,12 @@ test_file_lock_swmr_concur(hid_t H5_ATTR_UNUSED in_fapl)
/* Output message about test being performed */
TESTING("File open with different combintations of flags + SWMR flags--concurrent access");
SKIPPED();
- HDputs(" Test skipped due to a lack of unistd.h functionality.");
+ HDputs(" Test skipped due to fork or waitpid not defined.");
return 0;
} /* end test_file_lock_swmr_concur() */
-#else /* H5_HAVE_UNISTD_H */
+#else /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
static int
test_file_lock_swmr_concur(hid_t in_fapl)
@@ -6215,7 +6215,7 @@ error:
} /* end test_file_lock_swmr_concur() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
/****************************************************************
**
@@ -6227,7 +6227,7 @@ error:
static int
test_file_locking(hid_t in_fapl, hbool_t turn_locking_on, hbool_t env_var_override)
{
-#ifndef H5_HAVE_UNISTD_H
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
if (turn_locking_on && env_var_override)
TESTING("File locking: ON w/ env var override")
else if (turn_locking_on && !env_var_override)
@@ -6237,9 +6237,9 @@ test_file_locking(hid_t in_fapl, hbool_t turn_locking_on, hbool_t env_var_overri
else
TESTING("File locking: OFF")
SKIPPED();
- HDputs(" Test skipped due to a lack of unistd.h functionality.");
+ HDputs(" Test skipped due to fork or waitpid not defined.");
return 0;
-#else /* H5_HAVE_UNISTD_H */
+#else /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
hid_t fid = -1; /* File ID */
hid_t fapl = -1; /* File access property list */
char filename[NAME_BUF_SIZE]; /* file name */
@@ -6405,7 +6405,7 @@ error:
return -1;
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
} /* end test_file_locking() */
@@ -6699,7 +6699,7 @@ error:
* (7) Refresh the dataset
* (8) Verify the dataset's dimension and data are correct
*/
-#ifndef H5_HAVE_UNISTD_H
+#if !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID))
static int
test_refresh_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t new_format)
@@ -6712,11 +6712,11 @@ test_refresh_concur(hid_t H5_ATTR_UNUSED in_fapl, hbool_t new_format)
}
SKIPPED();
- HDputs(" Test skipped due to a lack of unistd.h functionality.");
+ HDputs(" Test skipped due to fork or waitpid not defined.");
return 0;
} /* test_refresh_concur() */
-#else /* H5_HAVE_UNISTD_H */
+#else /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
static int
test_refresh_concur(hid_t in_fapl, hbool_t new_format)
@@ -7016,7 +7016,7 @@ error:
return -1;
} /* test_refresh_concur() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* !(defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)) */
/*
* test_multiple_same():
diff --git a/test/swmr_addrem_writer.c b/test/swmr_addrem_writer.c
index d39a698..c705d6b 100644
--- a/test/swmr_addrem_writer.c
+++ b/test/swmr_addrem_writer.c
@@ -283,7 +283,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid; /* File ID for file opened */
long nops = 0; /* # of times to grow or shrink the dataset */
diff --git a/test/swmr_common.c b/test/swmr_common.c
index 2201427..b359bc6 100644
--- a/test/swmr_common.c
+++ b/test/swmr_common.c
@@ -202,13 +202,12 @@ generate_symbols(void)
unsigned u, v; /* Local index variables */
for (u = 0; u < NLEVELS; u++) {
- symbol_info[u] = (symbol_info_t *)HDmalloc(symbol_count[u] * sizeof(symbol_info_t));
+ symbol_info[u] = HDmalloc(symbol_count[u] * sizeof(symbol_info_t));
for (v = 0; v < symbol_count[u]; v++) {
char name_buf[64];
generate_name(name_buf, u, v);
- symbol_info[u][v].name = (char *)HDmalloc(HDstrlen(name_buf) + 1);
- HDstrcpy(symbol_info[u][v].name, name_buf);
+ symbol_info[u][v].name = HDstrdup(name_buf);
symbol_info[u][v].dsid = -1;
symbol_info[u][v].nrecords = 0;
} /* end for */
diff --git a/test/swmr_generator.c b/test/swmr_generator.c
index e1423b6..93cfa0f 100644
--- a/test/swmr_generator.c
+++ b/test/swmr_generator.c
@@ -258,7 +258,7 @@ usage(void)
} /* end usage() */
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
int comp_level = -1; /* Compression level (-1 is no compression) */
hbool_t verbose = TRUE; /* Whether to emit some informational messages */
diff --git a/test/swmr_reader.c b/test/swmr_reader.c
index 836e1cc..db3eba0 100644
--- a/test/swmr_reader.c
+++ b/test/swmr_reader.c
@@ -275,7 +275,7 @@ read_records(const char *filename, hbool_t verbose, FILE *verbose_file, unsigned
if ((fapl = h5_fileaccess()) < 0)
return -1;
- /* Log I/O when verbose output it enbabled */
+ /* Log I/O when verbose output it enabled */
if (use_log_vfd) {
char verbose_name[1024];
@@ -387,7 +387,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
long nseconds = 0; /* # of seconds to test */
int poll_time = 1; /* # of seconds between polling */
diff --git a/test/swmr_remove_reader.c b/test/swmr_remove_reader.c
index 910bc74..94cb7f8 100644
--- a/test/swmr_remove_reader.c
+++ b/test/swmr_remove_reader.c
@@ -371,7 +371,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
long nseconds = 0; /* # of seconds to test */
int poll_time = 1; /* # of seconds between polling */
diff --git a/test/swmr_remove_writer.c b/test/swmr_remove_writer.c
index e52f4e2..3604935 100644
--- a/test/swmr_remove_writer.c
+++ b/test/swmr_remove_writer.c
@@ -217,7 +217,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid; /* File ID for file opened */
long nshrinks = 0; /* # of times to shrink the dataset */
diff --git a/test/swmr_sparse_reader.c b/test/swmr_sparse_reader.c
index 00eb2e0..8f1c781 100644
--- a/test/swmr_sparse_reader.c
+++ b/test/swmr_sparse_reader.c
@@ -117,7 +117,7 @@ check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t
(uintmax_t)start[0], (uintmax_t)start[1]);
/* Read record from dataset */
- record->rec_id = (uint64_t)ULLONG_MAX;
+ record->rec_id = UINT64_MAX;
if (H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
return -1;
@@ -342,7 +342,7 @@ usage(void)
} /* end usage() */
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
long nrecords = 0; /* # of records to read */
int poll_time = 1; /* # of seconds to sleep when waiting for writer */
diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c
index a4fc93e..96eff6c 100644
--- a/test/swmr_sparse_writer.c
+++ b/test/swmr_sparse_writer.c
@@ -318,7 +318,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid; /* File ID for file opened */
long nrecords = 0; /* # of records to append */
diff --git a/test/swmr_start_write.c b/test/swmr_start_write.c
index 3ddd57a..d046b40 100644
--- a/test/swmr_start_write.c
+++ b/test/swmr_start_write.c
@@ -352,7 +352,7 @@ usage(void)
*H5Fstart_swmr_write(), add_records(), H5Fclose().
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid; /* File ID for file opened */
long nrecords = 0; /* # of records to append */
diff --git a/test/swmr_writer.c b/test/swmr_writer.c
index de8b054..c4178f0 100644
--- a/test/swmr_writer.c
+++ b/test/swmr_writer.c
@@ -275,7 +275,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid; /* File ID for file opened */
long nrecords = 0; /* # of records to append */
diff --git a/test/tattr.c b/test/tattr.c
index d66fcc3..2859f4c 100644
--- a/test/tattr.c
+++ b/test/tattr.c
@@ -2094,7 +2094,7 @@ test_attr_dense_verify(hid_t loc_id, unsigned max_attr)
/* Re-open all the attributes by name and verify the data */
for (u = 0; u < max_attr; u++) {
/* Open attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Aopen(loc_id, attrname, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Aopen");
@@ -2119,7 +2119,7 @@ test_attr_dense_verify(hid_t loc_id, unsigned max_attr)
CHECK(attr, FAIL, "H5Aopen_by_idx");
/* Verify Name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, check_name);
VERIFY(name_len, HDstrlen(attrname), "H5Aget_name");
if (HDstrcmp(check_name, attrname) != 0)
@@ -2219,7 +2219,7 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl)
/* Add attributes, until just before converting to dense storage */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -2238,7 +2238,7 @@ test_attr_dense_create(hid_t fcpl, hid_t fapl)
/* Add one more attribute, to push into "dense" storage */
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -2361,7 +2361,7 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl)
/* Add attributes, until just before converting to dense storage */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -2384,7 +2384,7 @@ test_attr_dense_open(hid_t fcpl, hid_t fapl)
/* Add one more attribute, to push into "dense" storage */
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -2523,7 +2523,7 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl)
/* Add attributes, until well into dense storage */
for (u = 0; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -2568,7 +2568,7 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl)
/* Delete attributes until the attributes revert to compact storage again */
for (u--; u >= min_dense; u--) {
/* Delete attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -2582,7 +2582,7 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl)
VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
/* Delete one more attribute, which should cause reversion to compact storage */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -2595,7 +2595,7 @@ test_attr_dense_delete(hid_t fcpl, hid_t fapl)
CHECK(ret, FAIL, "test_attr_dense_verify");
/* Delete another attribute, to verify deletion in compact storage */
- HDsprintf(attrname, "attr %02u", (u - 1));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u - 1));
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -2724,7 +2724,7 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl)
/* Add attributes, until well into dense storage */
for (u = 0; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
@@ -2790,7 +2790,7 @@ test_attr_dense_rename(hid_t fcpl, hid_t fapl)
unsigned value; /* Attribute value */
/* Open attribute */
- HDsprintf(attrname, "new attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "new attr %02u", u);
attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
CHECK(attr, H5I_INVALID_HID, "H5Aopen");
@@ -2914,7 +2914,7 @@ test_attr_dense_unlink(hid_t fcpl, hid_t fapl)
/* Add attributes, until well into dense storage */
for (u = 0; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3057,7 +3057,7 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl)
/* Create attribute */
u = 0;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3077,7 +3077,7 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl)
/* Create attribute */
u = 1;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3107,7 +3107,7 @@ test_attr_dense_limits(hid_t fcpl, hid_t fapl)
/* Delete attribute */
u = 0;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -3216,7 +3216,7 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl)
/* Add attributes, until just before converting to dense storage */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3239,7 +3239,7 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl)
CHECK(sid2, FAIL, "H5Screate_simple");
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3522,7 +3522,7 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl)
/* Delete a few attributes until the storage switches to compact */
for (u = min_dense - 1; u <= max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
add_attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(add_attr, FAIL, "H5Acreate2");
@@ -3590,7 +3590,7 @@ test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl)
/* Add attributes, until just before converting to dense storage */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(gid1, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3756,7 +3756,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Create attribute */
u = 0;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3774,7 +3774,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Create attribute */
u = 1;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3792,7 +3792,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Create attribute */
u = 2;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT);
if (low == H5F_LIBVER_LATEST) {
CHECK(attr, FAIL, "H5Acreate2");
@@ -3815,7 +3815,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Create attribute */
u = 3;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -3837,7 +3837,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Delete attribute */
u = 1;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -3851,7 +3851,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Delete attribute */
u = 3;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -3865,7 +3865,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Delete attribute */
u = 2;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -3879,7 +3879,7 @@ test_attr_big(hid_t fcpl, hid_t fapl)
/* Delete attribute */
u = 0;
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -4278,7 +4278,7 @@ test_attr_many(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create many attributes */
for (u = 0; u < nattr; u++) {
- HDsprintf(attrname, "a-%06u", u);
+ HDsnprintf(attrname, sizeof(attrname), "a-%06u", u);
exists = H5Aexists(gid, attrname);
VERIFY(exists, FALSE, "H5Aexists");
@@ -4330,7 +4330,7 @@ test_attr_many(hbool_t new_format, hid_t fcpl, hid_t fapl)
for (u = 0; u < nattr; u++) {
unsigned value; /* Attribute value */
- HDsprintf(attrname, "a-%06u", u);
+ HDsnprintf(attrname, sizeof(attrname), "a-%06u", u);
exists = H5Aexists(gid, attrname);
VERIFY(exists, TRUE, "H5Aexists");
@@ -4578,7 +4578,7 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl)
/* Create several attributes, but keep storage in compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -4667,7 +4667,7 @@ test_attr_corder_create_compact(hid_t fcpl, hid_t fapl)
H5A_info_t ainfo; /* Attribute information */
/* Retrieve information for attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT);
CHECK(ret, FAIL, "H5Aget_info_by_name");
@@ -4782,7 +4782,7 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl)
/* Create several attributes, but keep storage in compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -4805,7 +4805,7 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl)
} /* end for */
/* Create another attribute, to push into dense storage */
- HDsprintf(attrname, "attr %02u", max_compact);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", max_compact);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -4898,7 +4898,7 @@ test_attr_corder_create_dense(hid_t fcpl, hid_t fapl)
H5A_info_t ainfo; /* Attribute information */
/* Retrieve information for attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT);
CHECK(ret, FAIL, "H5Aget_info_by_name");
@@ -5171,7 +5171,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
/* Create several attributes, but keep storage in compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -5194,7 +5194,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
} /* end for */
/* Create another attribute, to push into dense storage */
- HDsprintf(attrname, "attr %02u", max_compact);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", max_compact);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -5222,7 +5222,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
/* Delete several attributes from object, until attribute storage resumes compact form */
for (u = max_compact; u >= min_dense; u--) {
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(my_dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -5242,7 +5242,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
} /* end for */
/* Delete another attribute, to push attribute storage into compact form */
- HDsprintf(attrname, "attr %02u", (min_dense - 1));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1));
ret = H5Adelete(my_dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -5258,7 +5258,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
/* Re-add attributes to get back into dense form */
for (u = (min_dense - 1); u < (max_compact + 1); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -5345,7 +5345,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
/* Delete several attributes from object, until attribute storage resumes compact form */
for (u = max_compact; u >= min_dense; u--) {
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(my_dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -5365,7 +5365,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
} /* end for */
/* Delete another attribute, to push attribute storage into compact form */
- HDsprintf(attrname, "attr %02u", (min_dense - 1));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1));
ret = H5Adelete(my_dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
@@ -5381,7 +5381,7 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
/* Re-add attributes to get back into dense form */
for (u = (min_dense - 1); u < (max_compact + 1); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -5410,11 +5410,11 @@ test_attr_corder_transition(hid_t fcpl, hid_t fapl)
/* Delete all attributes */
for (u = max_compact; u > 0; u--) {
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
ret = H5Adelete(my_dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
} /* end for */
- HDsprintf(attrname, "attr %02u", 0);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", 0);
ret = H5Adelete(my_dataset, attrname);
CHECK(ret, FAIL, "H5Adelete");
} /* end for */
@@ -5554,7 +5554,7 @@ test_attr_corder_delete(hid_t fcpl, hid_t fapl)
/* Create attributes, until attribute storage is in dense form */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -5897,7 +5897,7 @@ test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, up to limit of compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -5937,7 +5937,7 @@ test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create more attributes, to push into dense form */
for (; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -6381,7 +6381,7 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, up to limit of compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -6456,9 +6456,9 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0,
tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
if (order == H5_ITER_INC)
- HDsprintf(attrname, "attr %02u", (u + 1));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u + 1));
else
- HDsprintf(attrname, "attr %02u", (max_compact - (u + 2)));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (max_compact - (u + 2)));
ret = HDstrcmp(attrname, tmpname);
VERIFY(ret, 0, "H5Aget_name_by_idx");
} /* end for */
@@ -6494,7 +6494,7 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create more attributes, to push into dense form */
for (u = 0; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -6584,9 +6584,10 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0,
tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
if (order == H5_ITER_INC)
- HDsprintf(attrname, "attr %02u", (u + 1));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u + 1));
else
- HDsprintf(attrname, "attr %02u", ((max_compact * 2) - (u + 2)));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u",
+ ((max_compact * 2) - (u + 2)));
ret = HDstrcmp(attrname, tmpname);
VERIFY(ret, 0, "H5Aget_name_by_idx");
} /* end for */
@@ -6628,7 +6629,7 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, to push into dense form */
for (u = 0; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -6697,9 +6698,10 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u,
tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
if (order == H5_ITER_INC)
- HDsprintf(attrname, "attr %02u", ((u * 2) + 1));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 1));
else
- HDsprintf(attrname, "attr %02u", ((max_compact * 2) - ((u * 2) + 2)));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u",
+ ((max_compact * 2) - ((u * 2) + 2)));
ret = HDstrcmp(attrname, tmpname);
VERIFY(ret, 0, "H5Aget_name_by_idx");
} /* end for */
@@ -6749,9 +6751,10 @@ test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0,
tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
if (order == H5_ITER_INC)
- HDsprintf(attrname, "attr %02u", ((u * 2) + 3));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 3));
else
- HDsprintf(attrname, "attr %02u", ((max_compact * 2) - ((u * 2) + 4)));
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u",
+ ((max_compact * 2) - ((u * 2) + 4)));
ret = HDstrcmp(attrname, tmpname);
VERIFY(ret, 0, "H5Aget_name_by_idx");
} /* end for */
@@ -6832,7 +6835,7 @@ attr_iterate2_cb(hid_t loc_id, const char *attr_name, const H5A_info_t *info, vo
} /* end if */
/* Verify name of link */
- HDsprintf(attrname, "attr %02u", (unsigned)my_info.corder);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (unsigned)my_info.corder);
if (HDstrcmp(attr_name, attrname) != 0)
return (H5_ITER_ERROR);
@@ -7342,7 +7345,7 @@ test_attr_iterate2(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, up to limit of compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -7414,7 +7417,7 @@ test_attr_iterate2(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create more attributes, to push into dense form */
for (u = max_compact; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -7706,7 +7709,7 @@ test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, up to limit of compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -7765,7 +7768,7 @@ test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create more attributes, to push into dense form */
for (u = max_compact; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr =
H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -7870,7 +7873,7 @@ attr_open_check(hid_t fid, const char *dsetname, hid_t obj_id, unsigned max_attr
/* Open each attribute on object by index and check that it's the correct one */
for (u = 0; u < max_attrs; u++) {
/* Open the attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr_id = H5Aopen(obj_id, attrname, H5P_DEFAULT);
CHECK(attr_id, FAIL, "H5Aopen");
@@ -8039,7 +8042,7 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, up to limit of compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -8105,7 +8108,7 @@ test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create more attributes, to push into dense form */
for (u = max_compact; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate2");
@@ -8288,7 +8291,7 @@ test_attr_create_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create attributes, up to limit of compact form */
for (u = 0; u < max_compact; u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT,
H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate_by_name");
@@ -8345,7 +8348,7 @@ test_attr_create_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
/* Create more attributes, to push into dense form */
for (u = max_compact; u < (max_compact * 2); u++) {
/* Create attribute */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT,
H5P_DEFAULT, H5P_DEFAULT);
CHECK(attr, FAIL, "H5Acreate_by_name");
@@ -8570,7 +8573,7 @@ test_attr_shared_write(hid_t fcpl, hid_t fapl)
/* Add attributes to each dataset, until after converting to dense storage */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
/* Alternate between creating "small" & "big" attributes */
if (u % 2) {
@@ -8907,7 +8910,7 @@ test_attr_shared_rename(hid_t fcpl, hid_t fapl)
/* Add attributes to each dataset, until after converting to dense storage */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
/* Alternate between creating "small" & "big" attributes */
if (u % 2) {
@@ -9354,7 +9357,7 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl)
/* Add attributes to each dataset, until after converting to dense storage */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
/* Alternate between creating "small" & "big" attributes */
if (u % 2) {
@@ -9462,7 +9465,7 @@ test_attr_shared_delete(hid_t fcpl, hid_t fapl)
/* Delete attributes from second dataset */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
/* Delete second dataset's attribute */
ret = H5Adelete_by_name(fid, DSET2_NAME, attrname, H5P_DEFAULT);
@@ -9726,7 +9729,7 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl)
/* Add attributes to each dataset, until after converting to dense storage */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
/* Alternate between creating "small" & "big" attributes */
if (u % 2) {
@@ -9850,7 +9853,7 @@ test_attr_shared_unlink(hid_t fcpl, hid_t fapl)
/* Check ref count on attributes of first dataset */
for (u = 0; u < max_compact * 2; u++) {
/* Create attribute name */
- HDsprintf(attrname, "attr %02u", u);
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
/* Open attribute on first dataset */
attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
diff --git a/test/testabort_fail.sh.in b/test/test_abort_fail.sh.in
index a866f9e..a866f9e 100644
--- a/test/testabort_fail.sh.in
+++ b/test/test_abort_fail.sh.in
diff --git a/test/testcheck_version.sh.in b/test/test_check_version.sh.in
index 273702e..273702e 100644
--- a/test/testcheck_version.sh.in
+++ b/test/test_check_version.sh.in
diff --git a/test/testerror.sh.in b/test/test_error.sh.in
index 4fb2a81..4fb2a81 100644
--- a/test/testerror.sh.in
+++ b/test/test_error.sh.in
diff --git a/test/testexternal_env.sh.in b/test/test_external_env.sh.in
index 94fbb88..94fbb88 100644
--- a/test/testexternal_env.sh.in
+++ b/test/test_external_env.sh.in
diff --git a/test/test_filter_plugin.sh.in b/test/test_filter_plugin.sh.in
deleted file mode 100644
index 78cdb3a..0000000
--- a/test/test_filter_plugin.sh.in
+++ /dev/null
@@ -1,113 +0,0 @@
-#! /bin/sh
-#
-# Copyright by The HDF Group.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-srcdir=@srcdir@
-TOP_BUILDDIR=@top_builddir@
-
-# Determine if backward compatibility options enabled
-DEPRECATED_SYMBOLS="@DEPRECATED_SYMBOLS@"
-
-EXIT_SUCCESS=0
-EXIT_FAILURE=1
-
-nerrors=0
-verbose=yes
-exit_code=$EXIT_SUCCESS
-
-TEST_NAME=filter_plugin
-TEST_BIN=`pwd`/$TEST_NAME
-FROM_DIR=`pwd`/.libs
-case $(uname) in
- CYGWIN* )
- PLUGINS_FOR_DIR1="$FROM_DIR/cygfilter_plugin1* $FROM_DIR/cygfilter_plugin3*"
- PLUGINS_FOR_DIR2="$FROM_DIR/cygfilter_plugin2* $FROM_DIR/cygfilter_plugin4*"
- ;;
- *)
- PLUGINS_FOR_DIR1="$FROM_DIR/libfilter_plugin1* $FROM_DIR/libfilter_plugin3*"
- PLUGINS_FOR_DIR2="$FROM_DIR/libfilter_plugin2* $FROM_DIR/libfilter_plugin4*"
- ;;
-esac
-PLUGIN_DIR1=filter_plugin_dir1
-PLUGIN_DIR2=filter_plugin_dir2
-CP="cp -p" # Use -p to preserve mode,ownership,timestamps
-RM="rm -rf"
-
-# Print a line-line message left justified in a field of 70 characters
-# beginning with the word "Testing".
-#
-TESTING() {
- SPACES=" "
- echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
-}
-
-# Main Body
-# Create test directories if necessary.
-test -d $PLUGIN_DIR1 || mkdir -p $PLUGIN_DIR1
-if [ $? != 0 ]; then
- echo "Failed to create filter plugin test directory ($PLUGIN_DIR1)"
- exit $EXIT_FAILURE
-fi
-
-test -d $PLUGIN_DIR2 || mkdir -p $PLUGIN_DIR2
-if [ $? != 0 ]; then
- echo "Failed to create filter plugin test directory ($PLUGIN_DIR2)"
- exit $EXIT_FAILURE
-fi
-
-# Copy plugins for the tests.
-$CP $PLUGINS_FOR_DIR1 $PLUGIN_DIR1
-if [ $? != 0 ]; then
- echo "Failed to copy filter plugins ($PLUGINS_FOR_DIR1) to test directory."
- exit $EXIT_FAILURE
-fi
-
-$CP $PLUGINS_FOR_DIR2 $PLUGIN_DIR2
-if [ $? != 0 ]; then
- echo "Failed to copy filter plugins ($PLUGINS_FOR_DIR2) to test directory."
- exit $EXIT_FAILURE
-fi
-
-# setup plugin path
-ENVCMD="env HDF5_PLUGIN_PATH=${PLUGIN_DIR1}:${PLUGIN_DIR2}"
-
-# Run the test
-$ENVCMD $TEST_BIN
-if [ $? != 0 ]; then
- nerrors=`expr $nerrors + 1`
-fi
-
-############################################
-# HDFFV-9655 test for relative path disabled
-# setup filter plugin path relative to test
-# actual executable is in the .libs folder
-#ENVCMD="env HDF5_PLUGIN_PATH=@/../${PLUGIN_DIR1}:@/../${PLUGIN_DIR2}"
-#
-# Run the test
-#$ENVCMD $TEST_BIN
-#if [ $? != 0 ]; then
-# nerrors=`expr $nerrors + 1`
-#fi
-#############################################
-
-# print results
-if test $nerrors -ne 0 ; then
- echo "$nerrors errors encountered"
- exit_code=$EXIT_FAILURE
-else
- echo "All filter plugin tests passed."
- exit_code=$EXIT_SUCCESS
-fi
-
-# Clean up temporary files/directories and leave
-$RM $PLUGIN_DIR1 $PLUGIN_DIR2
-
-exit $exit_code
diff --git a/test/testflushrefresh.sh.in b/test/test_flush_refresh.sh.in
index 209d370..209d370 100644
--- a/test/testflushrefresh.sh.in
+++ b/test/test_flush_refresh.sh.in
diff --git a/test/testlibinfo.sh.in b/test/test_libinfo.sh.in
index d9fee48..d9fee48 100644
--- a/test/testlibinfo.sh.in
+++ b/test/test_libinfo.sh.in
diff --git a/test/testlinks_env.sh.in b/test/test_links_env.sh.in
index 09074c3..09074c3 100644
--- a/test/testlinks_env.sh.in
+++ b/test/test_links_env.sh.in
diff --git a/test/test_plugin.sh.in b/test/test_plugin.sh.in
new file mode 100644
index 0000000..d958b4b
--- /dev/null
+++ b/test/test_plugin.sh.in
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://www.hdfgroup.org/licenses.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+# This shell script is for testing filter, VFD, and VOL plugins.
+#
+srcdir=@srcdir@
+TOP_BUILDDIR=@top_builddir@
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+
+CP="cp -p" # Use -p to preserve mode,ownership, timestamps
+RM="rm -rf"
+
+nerrors=0
+verbose=yes
+exit_code=$EXIT_SUCCESS
+
+# Test binary names
+FILTER_TEST_NAME=filter_plugin
+FILTER_TEST_BIN=`pwd`/$FILTER_TEST_NAME
+
+VFD_TEST_NAME=vfd_plugin
+VFD_TEST_BIN=`pwd`/$VFD_TEST_NAME
+
+VOL_TEST_NAME=vol_plugin
+VOL_TEST_BIN=`pwd`/$VOL_TEST_NAME
+
+# Paths to actual plugins ("libraries" in test directory are just stubs)
+FROM_DIR=`pwd`/.libs
+case $(uname) in
+ CYGWIN* )
+ NULL_VFD_PLUGIN="$FROM_DIR/cygnull_vfd_plugin*"
+ NULL_VOL_PLUGIN="$FROM_DIR/cygnull_vol_connector*"
+ PLUGINS_FOR_DIR1="$FROM_DIR/cygfilter_plugin1* $FROM_DIR/cygfilter_plugin3*"
+ PLUGINS_FOR_DIR2="$FROM_DIR/cygfilter_plugin2* $FROM_DIR/cygfilter_plugin4*"
+ ;;
+ *)
+ NULL_VFD_PLUGIN="$FROM_DIR/libnull_vfd_plugin*"
+ NULL_VOL_PLUGIN="$FROM_DIR/libnull_vol_connector*"
+ PLUGINS_FOR_DIR1="$FROM_DIR/libfilter_plugin1* $FROM_DIR/libfilter_plugin3*"
+ PLUGINS_FOR_DIR2="$FROM_DIR/libfilter_plugin2* $FROM_DIR/libfilter_plugin4*"
+ ;;
+esac
+
+# Directories where we'll copy plugins
+TEMP_PLUGIN_DIR=temp_plugins
+TEMP_FILTER_DIR1=temp_filter_plugin_dir1
+TEMP_FILTER_DIR2=temp_filter_plugin_dir2
+
+# Function to print a line-line message left justified in a field of
+# 70 characters beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+#############
+# Main Body #
+#############
+
+# Create plugin directories
+test -d $TEMP_PLUGIN_DIR || mkdir -p $TEMP_PLUGIN_DIR
+if [ $? != 0 ]; then
+ echo "Failed to create plugin test directory ($TEMP_PLUGIN_DIR)"
+ exit $EXIT_FAILURE
+fi
+test -d $TEMP_FILTER_DIR1 || mkdir -p $TEMP_FILTER_DIR1
+if [ $? != 0 ]; then
+ echo "Failed to create filter plugin test directory ($TEMP_FILTER_DIR1)"
+ exit $EXIT_FAILURE
+fi
+test -d $TEMP_FILTER_DIR2 || mkdir -p $TEMP_FILTER_DIR2
+if [ $? != 0 ]; then
+ echo "Failed to create filter plugin test directory ($TEMP_FILTER_DIR2)"
+ exit $EXIT_FAILURE
+fi
+
+# Copy plugins for the tests
+$CP $NULL_VFD_PLUGIN $TEMP_PLUGIN_DIR
+if [ $? != 0 ]; then
+ echo "Failed to copy NULL VFD plugin ($NULL_VFD_PLUGIN) to test directory."
+ exit $EXIT_FAILURE
+fi
+$CP $NULL_VOL_PLUGIN $TEMP_PLUGIN_DIR
+if [ $? != 0 ]; then
+ echo "Failed to copy NULL VOL plugin ($NULL_VOL_PLUGIN) to test directory."
+ exit $EXIT_FAILURE
+fi
+$CP $PLUGINS_FOR_DIR1 $TEMP_FILTER_DIR1
+if [ $? != 0 ]; then
+ echo "Failed to copy filter plugins ($PLUGINS_FOR_DIR1) to test directory."
+ exit $EXIT_FAILURE
+fi
+$CP $PLUGINS_FOR_DIR2 $TEMP_FILTER_DIR2
+if [ $? != 0 ]; then
+ echo "Failed to copy filter plugins ($PLUGINS_FOR_DIR2) to test directory."
+ exit $EXIT_FAILURE
+fi
+
+# Set plugin path
+ENVCMD="env HDF5_PLUGIN_PATH=${TEMP_PLUGIN_DIR}:${TEMP_FILTER_DIR1}:${TEMP_FILTER_DIR2}"
+
+# Run the tests
+$ENVCMD $FILTER_TEST_BIN
+if [ $? != 0 ]; then
+ nerrors=`expr $nerrors + 1`
+fi
+$ENVCMD $VFD_TEST_BIN
+if [ $? != 0 ]; then
+ nerrors=`expr $nerrors + 1`
+fi
+$ENVCMD $VOL_TEST_BIN
+if [ $? != 0 ]; then
+ nerrors=`expr $nerrors + 1`
+fi
+
+# Print results
+if test $nerrors -ne 0 ; then
+ echo "$nerrors errors encountered"
+ exit_code=$EXIT_FAILURE
+else
+ echo "All plugin tests passed."
+ exit_code=$EXIT_SUCCESS
+fi
+
+# Clean up temporary files/directories and leave
+$RM $TEMP_PLUGIN_DIR $TEMP_FILTER_DIR1 $TEMP_FILTER_DIR2
+
+exit $exit_code
diff --git a/test/testswmr.pwsh.in b/test/test_swmr.pwsh.in
index de7a57a..de7a57a 100644
--- a/test/testswmr.pwsh.in
+++ b/test/test_swmr.pwsh.in
diff --git a/test/testswmr.sh.in b/test/test_swmr.sh.in
index 771fe4a..771fe4a 100644
--- a/test/testswmr.sh.in
+++ b/test/test_swmr.sh.in
diff --git a/test/test_usecases.sh.in b/test/test_use_cases.sh.in
index fd30afd..fd30afd 100644
--- a/test/test_usecases.sh.in
+++ b/test/test_use_cases.sh.in
diff --git a/test/testvds_env.sh.in b/test/test_vds_env.sh.in
index 39f13a5..39f13a5 100644
--- a/test/testvds_env.sh.in
+++ b/test/test_vds_env.sh.in
diff --git a/test/testvdsswmr.pwsh.in b/test/test_vds_swmr.pwsh.in
index bf5aabb..bf5aabb 100644
--- a/test/testvdsswmr.pwsh.in
+++ b/test/test_vds_swmr.pwsh.in
diff --git a/test/testvdsswmr.sh.in b/test/test_vds_swmr.sh.in
index 399fdef..399fdef 100644
--- a/test/testvdsswmr.sh.in
+++ b/test/test_vds_swmr.sh.in
diff --git a/test/test_vol_plugin.sh.in b/test/test_vol_plugin.sh.in
deleted file mode 100644
index d31646b..0000000
--- a/test/test_vol_plugin.sh.in
+++ /dev/null
@@ -1,84 +0,0 @@
-#! /bin/sh
-#
-# Copyright by The HDF Group.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the COPYING file, which can be found at the root of the source code
-# distribution tree, or in https://www.hdfgroup.org/licenses.
-# If you do not have access to either file, you may request a copy from
-# help@hdfgroup.org.
-#
-# This shell script is for testing VOL connector plugins.
-#
-srcdir=@srcdir@
-TOP_BUILDDIR=@top_builddir@
-
-EXIT_SUCCESS=0
-EXIT_FAILURE=1
-
-nerrors=0
-verbose=yes
-exit_code=$EXIT_SUCCESS
-
-TEST_NAME=vol_plugin
-TEST_BIN=`pwd`/$TEST_NAME
-FROM_DIR=`pwd`/.libs
-case $(uname) in
- CYGWIN* )
- NULL_VOL_PLUGIN="$FROM_DIR/cygnull_vol_connector*"
- ;;
- *)
- NULL_VOL_PLUGIN="$FROM_DIR/libnull_vol_connector*"
- ;;
-esac
-TEMP_PLUGIN_DIR=null_vol_plugin_dir
-CP="cp -p" # Use -p to preserve mode,ownership, timestamps
-RM="rm -rf"
-
-# Print a line-line message left justified in a field of 70 characters
-# beginning with the word "Testing".
-#
-TESTING() {
- SPACES=" "
- echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
-}
-
-# Main Body
-# Create test directory if necessary.
-test -d $TEMP_PLUGIN_DIR || mkdir -p $TEMP_PLUGIN_DIR
-if [ $? != 0 ]; then
- echo "Failed to create VOL connector plugin test directory ($TEMP_PLUGIN_DIR)"
- exit $EXIT_FAILURE
-fi
-
-# Copy plugin for the tests.
-$CP $NULL_VOL_PLUGIN $TEMP_PLUGIN_DIR
-if [ $? != 0 ]; then
- echo "Failed to copy NULL VOL plugin ($NULL_VOL_PLUGIN) to test directory."
- exit $EXIT_FAILURE
-fi
-
-# setup plugin path
-ENVCMD="env HDF5_PLUGIN_PATH=${TEMP_PLUGIN_DIR}"
-
-# Run the test
-$ENVCMD $TEST_BIN
-if [ $? != 0 ]; then
- nerrors=`expr $nerrors + 1`
-fi
-
-# print results
-if test $nerrors -ne 0 ; then
- echo "$nerrors errors encountered"
- exit_code=$EXIT_FAILURE
-else
- echo "All VOL plugin tests passed."
- exit_code=$EXIT_SUCCESS
-fi
-
-# Clean up temporary files/directories and leave
-$RM $TEMP_PLUGIN_DIR
-
-exit $exit_code
diff --git a/test/testhdf5.c b/test/testhdf5.c
index dc5f0aa..b5db71b 100644
--- a/test/testhdf5.c
+++ b/test/testhdf5.c
@@ -45,7 +45,6 @@ main(int argc, char *argv[])
AddTest("config", test_configure, cleanup_configure, "Configure definitions", NULL);
AddTest("metadata", test_metadata, cleanup_metadata, "Encoding/decoding metadata", NULL);
AddTest("checksum", test_checksum, cleanup_checksum, "Checksum algorithm", NULL);
- AddTest("heap", test_heap, NULL, "Memory Heaps", NULL);
AddTest("skiplist", test_skiplist, NULL, "Skip Lists", NULL);
AddTest("refstr", test_refstr, NULL, "Reference Counted Strings", NULL);
AddTest("file", test_file, cleanup_file, "Low-Level File I/O", NULL);
diff --git a/test/testhdf5.h b/test/testhdf5.h
index ba5fa71..c5b8246 100644
--- a/test/testhdf5.h
+++ b/test/testhdf5.h
@@ -203,7 +203,6 @@ extern "C" {
/* Prototypes for the test routines */
void test_metadata(void);
void test_checksum(void);
-void test_heap(void);
void test_refstr(void);
void test_file(void);
void test_h5o(void);
diff --git a/test/theap.c b/test/theap.c
deleted file mode 100644
index 9d5787b..0000000
--- a/test/theap.c
+++ /dev/null
@@ -1,1081 +0,0 @@
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Copyright by The HDF Group. *
- * Copyright by the Board of Trustees of the University of Illinois. *
- * All rights reserved. *
- * *
- * This file is part of HDF5. The full HDF5 copyright notice, including *
- * terms governing use, modification, and redistribution, is contained in *
- * the COPYING file, which can be found at the root of the source code *
- * distribution tree, or in https://www.hdfgroup.org/licenses. *
- * If you do not have access to either file, you may request a copy from *
- * help@hdfgroup.org. *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-/*
- Test HDF Heap routines.
-
- REMARKS
-
- DESIGN
-
- BUGS/LIMITATIONS
-
- EXPORTED ROUTINES
-
- AUTHOR
- Quincey Koziol
-
- MODIFICATION HISTORY
- 2/18/03 - Started coding
- */
-
-#include "testhdf5.h"
-#include "H5HPprivate.h"
-
-/* The number of elements in testing arrays */
-#define NUM_ELEMS 1000
-
-/* Objects for testing in heaps */
-typedef struct test_obj {
- H5HP_info_t heap_info; /* Information required for heap. _MUST_ be first */
- int val; /* Actual information for object */
-} test_obj;
-
-/* Array of random element values */
-static test_obj *rand_num;
-
-/* Array of random elements values, sorted in increasing order */
-static test_obj *inc_sort_num;
-
-/* Array of random elements values, sorted in decreasing order */
-static test_obj *dec_sort_num;
-
-static int
-tst_dec_sort(const void *_i1, const void *_i2)
-{
- const test_obj *i1 = (const test_obj *)_i1;
- const test_obj *i2 = (const test_obj *)_i2;
-
- if (i1->val < i2->val)
- return (1);
- else if (i1->val > i2->val)
- return (-1);
- return (0);
-}
-
-static int
-tst_inc_sort(const void *_i1, const void *_i2)
-{
- const test_obj *i1 = (const test_obj *)_i1;
- const test_obj *i2 = (const test_obj *)_i2;
-
- if (i1->val < i2->val)
- return (-1);
- else if (i1->val > i2->val)
- return (1);
- return (0);
-}
-
-/****************************************************************
-**
-** test_heap_init(): Test H5HP (heap) code.
-** Initialize data for Heap testing
-**
-****************************************************************/
-static void
-test_heap_init(void)
-{
- time_t curr_time; /* Current time, for seeding random number generator */
- size_t u; /* Local index variables */
-
- /* Allocate arrays */
- rand_num = (test_obj *)HDmalloc(sizeof(test_obj) * NUM_ELEMS);
- CHECK_PTR(rand_num, "HDmalloc");
- inc_sort_num = (test_obj *)HDmalloc(sizeof(test_obj) * NUM_ELEMS);
- CHECK_PTR(inc_sort_num, "HDmalloc");
- dec_sort_num = (test_obj *)HDmalloc(sizeof(test_obj) * NUM_ELEMS);
- CHECK_PTR(dec_sort_num, "HDmalloc");
-
- /* Create randomized set of numbers */
- curr_time = HDtime(NULL);
- HDsrandom((unsigned)curr_time);
- for (u = 0; u < NUM_ELEMS; u++)
- /* Generate random numbers from -1000 to 1000 */
- rand_num[u].val = (int)(HDrandom() % 2001) - 1001;
-
- /* Sort random numbers into increasing order */
- HDmemcpy(inc_sort_num, rand_num, sizeof(test_obj) * NUM_ELEMS);
- HDqsort(inc_sort_num, (size_t)NUM_ELEMS, sizeof(test_obj), tst_inc_sort);
-
- /* Sort random numbers into decreasing order */
- HDmemcpy(dec_sort_num, rand_num, sizeof(test_obj) * NUM_ELEMS);
- HDqsort(dec_sort_num, (size_t)NUM_ELEMS, sizeof(test_obj), tst_dec_sort);
-} /* end test_heap_init() */
-
-/****************************************************************
-**
-** test_heap_create(): Test basic H5HP (heap) code.
-** Tests creating and closing heaps.
-**
-****************************************************************/
-static void
-test_heap_create(void)
-{
- H5HP_t *heap; /* Heap created */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Creating & Closing Heaps\n"));
-
- /* Try creating a maximum Heap */
- heap = H5HP_create(H5HP_MAX_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Try closing the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
- /* Try creating a minimum Heap */
- heap = H5HP_create(H5HP_MIN_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Try closing the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_create() */
-
-/****************************************************************
-**
-** test_heap_insert_min(): Test H5HP (heap) code.
-** Tests basic inserting objects into minimum heaps.
-**
-****************************************************************/
-static void
-test_heap_insert_min(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Inserting Into Minimum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MIN_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Check that the heap has one element */
- num = H5HP_count(heap);
- VERIFY(num, 1, "H5HP_count");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 10, "H5HP_top");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Check that the heap has two elements */
- num = H5HP_count(heap);
- VERIFY(num, 2, "H5HP_count");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 5, "H5HP_top");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Check that the heap has three elements */
- num = H5HP_count(heap);
- VERIFY(num, 3, "H5HP_count");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 5, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_insert_min() */
-
-/****************************************************************
-**
-** test_heap_insert(): Test H5HP (heap) code.
-** Tests basic inserting objects into maximum heaps.
-**
-****************************************************************/
-static void
-test_heap_insert_max(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Inserting Into Maximum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MAX_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Check that the heap has one element */
- num = H5HP_count(heap);
- VERIFY(num, 1, "H5HP_count");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 10, "H5HP_top");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Check that the heap has two elements */
- num = H5HP_count(heap);
- VERIFY(num, 2, "H5HP_count");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 10, "H5HP_top");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Check that the heap has three elements */
- num = H5HP_count(heap);
- VERIFY(num, 3, "H5HP_count");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 20, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_insert_max() */
-
-/****************************************************************
-**
-** test_heap_insert(): Test H5HP (heap) code.
-** Tests basic inserting objects into heaps.
-**
-****************************************************************/
-static void
-test_heap_insert(void)
-{
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Inserting Into Heaps\n"));
-
- /* Test insertions into minimum & maximum heaps */
- test_heap_insert_max();
- test_heap_insert_min();
-} /* end test_heap_insert() */
-
-/****************************************************************
-**
-** test_heap_insert_many_core (): Tests H5HP (heap) code.
-** "Core" routine called by test_heap_insert_many() routine.
-**
-****************************************************************/
-static void
-test_heap_insert_many_core(H5HP_type_t heap_type, test_obj *arr, size_t nelem, int top_val)
-{
- H5HP_t *heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- size_t u; /* Local index variable */
- herr_t ret; /* Generic return value */
-
- /* Create a Heap */
- heap = H5HP_create(heap_type);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert the array elements into the heap */
- for (u = 0; u < nelem; u++) {
- ret = H5HP_insert(heap, arr[u].val, &arr[u]);
- CHECK(ret, FAIL, "H5HP_insert");
- } /* end for */
-
- /* Check that the heap has correct number of elements */
- num = H5HP_count(heap);
- CHECK(num, FAIL, "H5HP_count");
- VERIFY((size_t)num, nelem, "H5HP_count");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, top_val, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-} /* end test_heap_insert_many_core() */
-
-/****************************************************************
-**
-** test_heap_insert_many (): Test H5HP (heap) code.
-** Tests inserting many objects into heaps.
-**
-****************************************************************/
-static void
-test_heap_insert_many(void)
-{
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Inserting Many Objects Into Heaps\n"));
-
- /* Test creating a heap from random elements */
- test_heap_insert_many_core(H5HP_MAX_HEAP, rand_num, (size_t)NUM_ELEMS, dec_sort_num[0].val);
-
- /* Test creating a heap from elements in increasing order */
- test_heap_insert_many_core(H5HP_MAX_HEAP, inc_sort_num, (size_t)NUM_ELEMS, dec_sort_num[0].val);
-
- /* Test creating a heap from elements in decreasing order */
- test_heap_insert_many_core(H5HP_MAX_HEAP, dec_sort_num, (size_t)NUM_ELEMS, dec_sort_num[0].val);
-
- /* Test creating a heap from random elements */
- test_heap_insert_many_core(H5HP_MIN_HEAP, rand_num, (size_t)NUM_ELEMS, inc_sort_num[0].val);
-
- /* Test creating a heap from elements in increasing order */
- test_heap_insert_many_core(H5HP_MIN_HEAP, inc_sort_num, (size_t)NUM_ELEMS, inc_sort_num[0].val);
-
- /* Test creating a heap from elements in decreasing order */
- test_heap_insert_many_core(H5HP_MIN_HEAP, dec_sort_num, (size_t)NUM_ELEMS, inc_sort_num[0].val);
-
-} /* end test_heap_insert_many() */
-
-/****************************************************************
-**
-** test_heap_remove_min(): Test H5HP (heap) code.
-** Tests basic removal of objects from minimum heaps.
-**
-****************************************************************/
-static void
-test_heap_remove_min(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- void * ptr; /* Pointer for object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Removing From Minimum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MIN_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Try removing an object from an empty heap */
- ret = H5HP_remove(heap, &val, &ptr);
- VERIFY(ret, FAIL, "H5HP_remove");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Remove first maximum value from heap */
- ret = H5HP_remove(heap, &val, &ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, 5, "H5HP_remove");
- CHECK_PTR_EQ(ptr, &obj2, "H5HP_remove");
-
- /* Remove second maximum value from heap */
- ret = H5HP_remove(heap, &val, &ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, 10, "H5HP_remove");
- CHECK_PTR_EQ(ptr, &obj1, "H5HP_remove");
-
- /* Remove third maximum value from heap */
- ret = H5HP_remove(heap, &val, &ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, 20, "H5HP_remove");
- CHECK_PTR_EQ(ptr, &obj3, "H5HP_remove");
-
- /* Try removing an object from an empty heap */
- ret = H5HP_remove(heap, &val, &ptr);
- VERIFY(ret, FAIL, "H5HP_remove");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_remove_min() */
-
-/****************************************************************
-**
-** test_heap_remove_max(): Test H5HP (heap) code.
-** Tests basic removal of objects from maximum heaps.
-**
-****************************************************************/
-static void
-test_heap_remove_max(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- void * ptr; /* Pointer for object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Removing From Maximum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MAX_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Try removing an object from an empty heap */
- ret = H5HP_remove(heap, &val, &ptr);
- VERIFY(ret, FAIL, "H5HP_remove");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Remove first maximum value from heap */
- ret = H5HP_remove(heap, &val, &ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, 20, "H5HP_remove");
- CHECK_PTR_EQ(ptr, &obj3, "H5HP_remove");
-
- /* Remove second maximum value from heap */
- ret = H5HP_remove(heap, &val, &ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, 10, "H5HP_remove");
- CHECK_PTR_EQ(ptr, &obj1, "H5HP_remove");
-
- /* Remove third maximum value from heap */
- ret = H5HP_remove(heap, &val, &ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, 5, "H5HP_remove");
- CHECK_PTR_EQ(ptr, &obj2, "H5HP_remove");
-
- /* Try removing an object from an empty heap */
- ret = H5HP_remove(heap, &val, &ptr);
- VERIFY(ret, FAIL, "H5HP_remove");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_remove_max() */
-
-/****************************************************************
-**
-** test_heap_remove(): Test H5HP (heap) code.
-** Tests basic removal of objects from minimum & maximum heaps.
-**
-****************************************************************/
-static void
-test_heap_remove(void)
-{
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Removing From Heaps\n"));
-
- /* Test removals from minimum & maximum heaps */
- test_heap_remove_max();
- test_heap_remove_min();
-} /* end test_heap_remove() */
-
-/****************************************************************
-**
-** test_heap_remove_many_core (): Tests H5HP (heap) code.
-** "Core" routine called by test_heap_remove_many() routine.
-**
-****************************************************************/
-static void
-test_heap_remove_many_core(H5HP_type_t heap_type, test_obj *arr, size_t nelem)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int last_val; /* Last value from the heap */
- int val; /* Value of object on heap */
- test_obj *ptr; /* Pointer for object on heap */
- size_t u; /* Local index variable */
- herr_t ret; /* Generic return value */
-
- /* Create a Heap */
- heap = H5HP_create(heap_type);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert the array elements into the heap */
- for (u = 0; u < nelem; u++) {
- ret = H5HP_insert(heap, arr[u].val, &arr[u]);
- CHECK(ret, FAIL, "H5HP_insert");
- } /* end for */
-
- /* Check that the heap has correct number of elements */
- num = H5HP_count(heap);
- CHECK(num, FAIL, "H5HP_count");
- VERIFY((size_t)num, nelem, "H5HP_count");
-
- /* Set an appropriate starting value for the "last" value from heap */
- if (heap_type == H5HP_MAX_HEAP)
- last_val = INT_MAX;
- else
- last_val = INT_MIN;
-
- /* Remove the objects from the heap */
- for (u = 0; u < nelem; u++) {
- ret = H5HP_remove(heap, &val, (void **)&ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, ptr->val, "H5HP_remove");
-
- /* Check that the value is correct, based on the heap type */
- if (heap_type == H5HP_MAX_HEAP) {
- if (val > last_val)
- TestErrPrintf("Error on line %d: incorrect value from heap=%d, last_val=%d\n", __LINE__, val,
- last_val);
- } /* end if */
- else {
- if (val < last_val)
- TestErrPrintf("Error on line %d: incorrect value from heap=%d, last_val=%d\n", __LINE__, val,
- last_val);
- } /* end else */
-
- /* Update last value */
- last_val = val;
- } /* end for */
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert & remove again, to check that completely empty heaps can be added again */
-
- /* Set an appropriate starting value for the "last" value from heap */
- if (heap_type == H5HP_MAX_HEAP)
- last_val = INT_MAX;
- else
- last_val = INT_MIN;
-
- /* Insert the array elements into the heap */
- for (u = 0; u < nelem; u++) {
- ret = H5HP_insert(heap, arr[u].val, &arr[u]);
- CHECK(ret, FAIL, "H5HP_insert");
- } /* end for */
-
- /* Check that the heap has correct number of elements */
- num = H5HP_count(heap);
- CHECK(num, FAIL, "H5HP_count");
- VERIFY((size_t)num, nelem, "H5HP_count");
-
- /* Remove the objects from the heap */
- for (u = 0; u < nelem; u++) {
- ret = H5HP_remove(heap, &val, (void **)&ptr);
- CHECK(ret, FAIL, "H5HP_remove");
- VERIFY(val, ptr->val, "H5HP_remove");
-
- /* Check that the value is correct, based on the heap type */
- if (heap_type == H5HP_MAX_HEAP) {
- if (val > last_val)
- TestErrPrintf("Error on line %d: incorrect value from heap=%d, last_val=%d\n", __LINE__, val,
- last_val);
- } /* end if */
- else {
- if (val < last_val)
- TestErrPrintf("Error on line %d: incorrect value from heap=%d, last_val=%d\n", __LINE__, val,
- last_val);
- } /* end else */
-
- /* Update last value */
- last_val = val;
- } /* end for */
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-} /* end test_heap_remove_many_core() */
-
-/****************************************************************
-**
-** test_heap_remove_many (): Test H5HP (heap) code.
-** Tests removing many objects into heaps.
-**
-****************************************************************/
-static void
-test_heap_remove_many(void)
-{
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Removing Many Objects From Heaps\n"));
-
- /* Test removing objects from maximum heap with random elements */
- test_heap_remove_many_core(H5HP_MAX_HEAP, rand_num, (size_t)NUM_ELEMS);
-
- /* Test removing objects from maximum heap with elements already sorted in increasing order */
- test_heap_remove_many_core(H5HP_MAX_HEAP, inc_sort_num, (size_t)NUM_ELEMS);
-
- /* Test removing objects from maximum heap with elements already sorted in decreasing order */
- test_heap_remove_many_core(H5HP_MAX_HEAP, dec_sort_num, (size_t)NUM_ELEMS);
-
- /* Test removing objects from minimum heap with random elements */
- test_heap_remove_many_core(H5HP_MIN_HEAP, rand_num, (size_t)NUM_ELEMS);
-
- /* Test removing objects from minimum heap with elements already sorted in increasing order */
- test_heap_remove_many_core(H5HP_MIN_HEAP, inc_sort_num, (size_t)NUM_ELEMS);
-
- /* Test removing objects from minimum heap with elements already sorted in decreasing order */
- test_heap_remove_many_core(H5HP_MIN_HEAP, dec_sort_num, (size_t)NUM_ELEMS);
-
-} /* end test_heap_remove_many() */
-
-/****************************************************************
-**
-** test_heap_change_min (): Test H5HP (heap) code.
-** Tests changing the priority of an object in a minimum heap
-**
-****************************************************************/
-static void
-test_heap_change_min(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Changing Priority of Objects in Minimum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MIN_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Change priority of first object on heap in way which shouldn't affect heap order */
- ret = H5HP_change(heap, 11, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 5, "H5HP_top");
-
- /* Change priority of first object on heap to be the top object on the heap */
- ret = H5HP_change(heap, 3, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 3, "H5HP_top");
-
- /* Change priority of first object on heap to not be the top object on the heap */
- ret = H5HP_change(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 5, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_change_min() */
-
-/****************************************************************
-**
-** test_heap_change_max (): Test H5HP (heap) code.
-** Tests changing the priority of an object in a maximumheap
-**
-****************************************************************/
-static void
-test_heap_change_max(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Changing Priority of Objects in Maximum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MAX_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Change priority of first object on heap in way which shouldn't affect heap order */
- ret = H5HP_change(heap, 11, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 20, "H5HP_top");
-
- /* Change priority of first object on heap to be the top object on the heap */
- ret = H5HP_change(heap, 21, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 21, "H5HP_top");
-
- /* Change priority of first object on heap to not be the top object on the heap */
- ret = H5HP_change(heap, 10, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 20, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_change() */
-
-/****************************************************************
-**
-** test_heap_change (): Test H5HP (heap) code.
-** Tests changing the priority of an object in maximum & minimum heaps
-**
-****************************************************************/
-static void
-test_heap_change(void)
-{
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Changing Priority of Objects in Heaps\n"));
-
- /* Test removals from minimum & maximum heaps */
- test_heap_change_max();
- test_heap_change_min();
-} /* end test_heap_change() */
-
-/****************************************************************
-**
-** test_heap_incdec_min (): Test H5HP (heap) code.
-** Tests incrementing & decrementing priority of objects on
-** a minimum heap.
-**
-****************************************************************/
-static void
-test_heap_incdec_min(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Incrementing & Decrementing Priority of Objects in Minimum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MIN_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 6, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Decrement object one's priority by two to put it on top of the heap */
- ret = H5HP_decr(heap, 2, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 4, "H5HP_top");
-
- /* Decrement object two's priority by two to put it back on top of the heap */
- ret = H5HP_decr(heap, 2, &obj2);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 3, "H5HP_top");
-
- /* Increment object two's priority by two to return object one to the top */
- ret = H5HP_incr(heap, 2, &obj2);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the minimum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 4, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_incdec_min() */
-
-/****************************************************************
-**
-** test_heap_incdec_max (): Test H5HP (heap) code.
-** Tests incrementing & decrementing priority of objects on
-** a maximum heap.
-**
-****************************************************************/
-static void
-test_heap_incdec_max(void)
-{
- H5HP_t * heap; /* Heap created */
- ssize_t num; /* Number of elements in heap */
- int val; /* Value of object on heap */
- test_obj obj1, obj2, obj3; /* Test objects to insert */
- herr_t ret; /* Generic return value */
-
- /* Output message about test being performed */
- MESSAGE(7, ("Testing Incrementing & Decrementing Priority of Objects in Maximum Heaps\n"));
-
- /* Create a Heap */
- heap = H5HP_create(H5HP_MAX_HEAP);
- CHECK_PTR(heap, "H5HP_create");
-
- /* Check that the heap has no elements */
- num = H5HP_count(heap);
- VERIFY(num, 0, "H5HP_count");
-
- /* Insert an object into the heap */
- obj1.val = 100;
- ret = H5HP_insert(heap, 19, &obj1);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert another object into the heap, with value less than top element */
- obj2.val = 50;
- ret = H5HP_insert(heap, 5, &obj2);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Insert third object into the heap, with value greater than top element */
- obj3.val = 200;
- ret = H5HP_insert(heap, 20, &obj3);
- CHECK(ret, FAIL, "H5HP_insert");
-
- /* Increment object one's priority by two to put it on top of the heap */
- ret = H5HP_incr(heap, 2, &obj1);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 21, "H5HP_top");
-
- /* Increment object three's priority by two to put it back on top of the heap */
- ret = H5HP_incr(heap, 2, &obj3);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 22, "H5HP_top");
-
- /* Decrement object three's priority by two to return object one to the top */
- ret = H5HP_decr(heap, 2, &obj3);
- CHECK(ret, FAIL, "H5HP_change");
-
- /* Check the maximum value on the heap */
- ret = H5HP_top(heap, &val);
- CHECK(ret, FAIL, "H5HP_top");
- VERIFY(val, 21, "H5HP_top");
-
- /* Close the heap */
- ret = H5HP_close(heap);
- CHECK(ret, FAIL, "H5HP_close");
-
-} /* end test_heap_incdec_max() */
-
-/****************************************************************
-**
-** test_heap_incdec (): Test H5HP (heap) code.
-** Tests incrementing & decrementing priority of objects on
-** maximum & minimum heaps.
-**
-****************************************************************/
-static void
-test_heap_incdec(void)
-{
- /* Output message about test being performed */
- MESSAGE(6, ("Testing Incrementing & Decrementing Priority of Objects in Heaps\n"));
-
- /* Test increments & decrements in minimum & maximum heaps */
- test_heap_incdec_max();
- test_heap_incdec_min();
-} /* end test_heap_incdec() */
-
-/****************************************************************
-**
-** test_heap_term(): Test H5HP (heap) code.
-** Release data for Heap testing
-**
-****************************************************************/
-static void
-test_heap_term(void)
-{
- /* Release arrays */
- if (rand_num)
- HDfree(rand_num);
- if (inc_sort_num)
- HDfree(inc_sort_num);
- if (dec_sort_num)
- HDfree(dec_sort_num);
-} /* end test_heap_term() */
-
-/****************************************************************
-**
-** test_heap(): Main H5HP testing routine.
-**
-****************************************************************/
-void
-test_heap(void)
-{
- /* Output message about test being performed */
- MESSAGE(5, ("Testing Heaps\n"));
-
- /* Initialize Heap testing data */
- test_heap_init();
-
- /* Actual Heap tests */
- test_heap_create(); /* Test Heap creation */
- test_heap_insert(); /* Test basic Heap insertion */
- test_heap_insert_many(); /* Test Heap insertion of many items */
- test_heap_remove(); /* Test basic Heap removal */
- test_heap_remove_many(); /* Test Heap removal of many items */
- test_heap_change(); /* Test changing priority of objects on Heap */
- test_heap_incdec(); /* Test incrementing & decrementing priority of objects on Heap */
-
- /* Release Heap testing data */
- test_heap_term();
-
-} /* end test_heap() */
diff --git a/test/tmisc.c b/test/tmisc.c
index a28e12e..f8bf602 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -1083,7 +1083,7 @@ test_misc6(void)
/* Loop through adding attributes to each dataset */
for (u = 0; u < MISC6_NUMATTR; u++) {
/* Create name for attribute */
- HDsprintf(attr_name, "Attr#%u", u);
+ HDsnprintf(attr_name, sizeof(attr_name), "Attr#%u", u);
/* Open the file */
loc_id = H5Fopen(MISC6_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
@@ -3034,7 +3034,7 @@ test_misc18(void)
/* Loop creating attributes on each dataset, flushing them to the file each time */
for (u = 0; u < 10; u++) {
/* Set up attribute name */
- HDsprintf(attr_name, "Attr %u", u);
+ HDsnprintf(attr_name, sizeof(attr_name), "Attr %u", u);
/* Create & close attribute on first dataset */
aid = H5Acreate2(did1, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT);
@@ -5504,7 +5504,7 @@ test_misc30(void)
CHECK(ret, FAIL, "test_misc30_get_info");
}
- HDsprintf(gname, "/g0/group%d", i);
+ HDsnprintf(gname, sizeof(gname), "/g0/group%d", i);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK(gid, FAIL, "H5Gcreate2");
diff --git a/test/trefstr.c b/test/trefstr.c
index d0575ab..89e62db 100644
--- a/test/trefstr.c
+++ b/test/trefstr.c
@@ -309,7 +309,7 @@ test_refstr_asprintf_cat(void)
/* Get pointer to raw string in ref-counted string */
s = H5RS_get_str(rs);
CHECK_PTR(s, "H5RS_get_str");
- HDsprintf(buf, "%d-%s", (int)10, "foo");
+ HDsnprintf(buf, sizeof(buf), "%d-%s", (int)10, "foo");
cmp = HDstrcmp(s, buf);
VERIFY(cmp, 0, "HDstrcmp");
@@ -320,7 +320,7 @@ test_refstr_asprintf_cat(void)
/* Get pointer to raw string in ref-counted string */
s = H5RS_get_str(rs);
CHECK_PTR(s, "H5RS_get_str");
- HDsprintf(buf, "%d-%s-%f", (int)10, "foo", (double)20.0);
+ HDsnprintf(buf, sizeof(buf), "%d-%s-%f", (int)10, "foo", (double)20.0);
cmp = HDstrcmp(s, buf);
VERIFY(cmp, 0, "HDstrcmp");
@@ -360,7 +360,7 @@ test_refstr_acat(void)
/* Get pointer to raw string in ref-counted string */
s = H5RS_get_str(rs);
CHECK_PTR(s, "H5RS_get_str");
- HDsprintf(buf, "%s", "foo");
+ HDsnprintf(buf, sizeof(buf), "%s", "foo");
cmp = HDstrcmp(s, buf);
VERIFY(cmp, 0, "HDstrcmp");
@@ -371,7 +371,7 @@ test_refstr_acat(void)
/* Get pointer to raw string in ref-counted string */
s = H5RS_get_str(rs);
CHECK_PTR(s, "H5RS_get_str");
- HDsprintf(buf, "%s", "foobar");
+ HDsnprintf(buf, sizeof(buf), "%s", "foobar");
cmp = HDstrcmp(s, buf);
VERIFY(cmp, 0, "HDstrcmp");
@@ -386,7 +386,7 @@ test_refstr_acat(void)
/* Get pointer to raw string in ref-counted string */
s = H5RS_get_str(rs);
CHECK_PTR(s, "H5RS_get_str");
- HDsprintf(buf, "%s", "foobar");
+ HDsnprintf(buf, sizeof(buf), "%s", "foobar");
large_str2 = HDmalloc(1024 + 6);
CHECK_PTR(large_str2, "HDmalloc");
HDstrcpy(large_str2, "foobar");
diff --git a/test/tselect.c b/test/tselect.c
index d9b625b..0d4176b 100644
--- a/test/tselect.c
+++ b/test/tselect.c
@@ -10862,13 +10862,15 @@ test_shape_same_dr__full_space_vs_slice(int test_num, int small_rank, int large_
HDassert(edge_size > 0);
HDassert(edge_size <= 1000);
- HDsprintf(test_desc_0, "\tn-cube slice through m-cube (n <= m) test %d.\n", test_num);
+ HDsnprintf(test_desc_0, sizeof(test_desc_0), "\tn-cube slice through m-cube (n <= m) test %d.\n",
+ test_num);
MESSAGE(7, (test_desc_0));
/* This statement must be updated if SS_DR_MAX_RANK is changed */
- HDsprintf(test_desc_1, "\t\tranks: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d.\n", small_rank,
- large_rank, offset, (int)dim_selected[0], (int)dim_selected[1], (int)dim_selected[2],
- (int)dim_selected[3], (int)dim_selected[4]);
+ HDsnprintf(test_desc_1, sizeof(test_desc_1),
+ "\t\tranks: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d.\n", small_rank, large_rank, offset,
+ (int)dim_selected[0], (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3],
+ (int)dim_selected[4]);
MESSAGE(7, (test_desc_1));
/* copy the edge size into the dims array */
@@ -11120,15 +11122,16 @@ test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, i
HDassert(dims_selected >= 0);
HDassert(dims_selected <= large_rank);
- HDsprintf(test_desc_0, "\tcheckerboard n-cube slice through m-cube (n <= m) test %d.\n", test_num);
+ HDsnprintf(test_desc_0, sizeof(test_desc_0),
+ "\tcheckerboard n-cube slice through m-cube (n <= m) test %d.\n", test_num);
MESSAGE(7, (test_desc_0));
/* This statement must be updated if SS_DR_MAX_RANK is changed */
- HDsprintf(test_desc_1,
- "\tranks: %d/%d edge/chkr size: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d:%d.\n",
- small_rank, large_rank, (int)edge_size, (int)checker_size, offset, (int)dim_selected[0],
- (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4],
- dims_selected);
+ HDsnprintf(test_desc_1, sizeof(test_desc_1),
+ "\tranks: %d/%d edge/chkr size: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d:%d.\n",
+ small_rank, large_rank, (int)edge_size, (int)checker_size, offset, (int)dim_selected[0],
+ (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4],
+ dims_selected);
MESSAGE(7, (test_desc_1));
/* copy the edge size into the dims array */
@@ -11664,15 +11667,16 @@ test_shape_same_dr__irregular(int test_num, int small_rank, int large_rank, int
HDassert(dims_selected >= 0);
HDassert(dims_selected <= large_rank);
- HDsprintf(test_desc_0, "\tirregular sub set of n-cube slice through m-cube (n <= m) test %d.\n",
- test_num);
+ HDsnprintf(test_desc_0, sizeof(test_desc_0),
+ "\tirregular sub set of n-cube slice through m-cube (n <= m) test %d.\n", test_num);
MESSAGE(7, (test_desc_0));
/* This statement must be updated if SS_DR_MAX_RANK is changed */
- HDsprintf(test_desc_1, "\tranks: %d/%d edge: %d s/p offset: %d/%d dim_selected: %d/%d/%d/%d/%d:%d.\n",
- small_rank, large_rank, edge_size, slice_offset, pattern_offset, (int)dim_selected[0],
- (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4],
- dims_selected);
+ HDsnprintf(test_desc_1, sizeof(test_desc_1),
+ "\tranks: %d/%d edge: %d s/p offset: %d/%d dim_selected: %d/%d/%d/%d/%d:%d.\n", small_rank,
+ large_rank, edge_size, slice_offset, pattern_offset, (int)dim_selected[0],
+ (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4],
+ dims_selected);
MESSAGE(7, (test_desc_1));
/* copy the edge size into the dims array */
diff --git a/test/tvlstr.c b/test/tvlstr.c
index 68f6124..5168d39 100644
--- a/test/tvlstr.c
+++ b/test/tvlstr.c
@@ -817,33 +817,33 @@ test_vl_rewrite(void)
/* Create in file 1 */
for (i = 0; i < REWRITE_NDATASETS; i++) {
- HDsprintf(name, "/set_%d", i);
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
write_scalar_dset(file1, type, space, name, name);
}
/* Effectively copy data from file 1 to 2 */
for (i = 0; i < REWRITE_NDATASETS; i++) {
- HDsprintf(name, "/set_%d", i);
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
read_scalar_dset(file1, type, space, name, name);
write_scalar_dset(file2, type, space, name, name);
}
/* Read back from file 2 */
for (i = 0; i < REWRITE_NDATASETS; i++) {
- HDsprintf(name, "/set_%d", i);
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
read_scalar_dset(file2, type, space, name, name);
} /* end for */
/* Remove from file 2. */
for (i = 0; i < REWRITE_NDATASETS; i++) {
- HDsprintf(name, "/set_%d", i);
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
ret = H5Ldelete(file2, name, H5P_DEFAULT);
CHECK(ret, FAIL, "H5Ldelete");
} /* end for */
/* Effectively copy from file 1 to file 2 */
for (i = 0; i < REWRITE_NDATASETS; i++) {
- HDsprintf(name, "/set_%d", i);
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
read_scalar_dset(file1, type, space, name, name);
write_scalar_dset(file2, type, space, name, name);
} /* end for */
diff --git a/test/twriteorder.c b/test/twriteorder.c
index 497542a..c58b030 100644
--- a/test/twriteorder.c
+++ b/test/twriteorder.c
@@ -63,7 +63,7 @@
/* This test uses many POSIX things that are not available on
* Windows.
*/
-#ifdef H5_HAVE_UNISTD_H
+#if defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)
#define DATAFILE "twriteorder.dat"
/* #define READERS_MAX 10 */ /* max number of readers */
@@ -466,7 +466,7 @@ done:
return ret_value;
}
-#else /* H5_HAVE_UNISTD_H */
+#else /* defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */
int
main(void)
@@ -475,4 +475,4 @@ main(void)
return EXIT_SUCCESS;
} /* end main() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID) */
diff --git a/test/unregister.c b/test/unregister.c
index 803f8373..ebb51b1 100644
--- a/test/unregister.c
+++ b/test/unregister.c
@@ -136,7 +136,7 @@ test_unregister_filters(hid_t fapl_id)
/* Create multiple groups under the main group */
for (i = 0; i < GROUP_ITERATION; i++) {
- HDsprintf(group_name, "group_%d", i);
+ HDsnprintf(group_name, sizeof(group_name), "group_%d", i);
if ((gid_loop = H5Gcreate2(gid, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid_loop) < 0)
diff --git a/test/use_append_chunk.c b/test/use_append_chunk.c
index 107615a..185cb26 100644
--- a/test/use_append_chunk.c
+++ b/test/use_append_chunk.c
@@ -63,7 +63,7 @@
/* This test uses many POSIX things that are not available on
* Windows.
*/
-#ifdef H5_HAVE_UNISTD_H
+#if defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)
#include "use.h"
@@ -270,7 +270,7 @@ done:
return (ret_value);
}
-#else /* H5_HAVE_UNISTD_H */
+#else /* defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) */
int
main(void)
@@ -279,4 +279,4 @@ main(void)
return EXIT_SUCCESS;
} /* end main() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) */
diff --git a/test/use_append_mchunks.c b/test/use_append_mchunks.c
index 47c9f92..60f63c0 100644
--- a/test/use_append_mchunks.c
+++ b/test/use_append_mchunks.c
@@ -55,7 +55,7 @@
/* This test uses many POSIX things that are not available on
* Windows.
*/
-#ifdef H5_HAVE_UNISTD_H
+#if defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID)
#include "use.h"
@@ -265,7 +265,7 @@ done:
return (ret_value);
} /* end main() */
-#else /* H5_HAVE_UNISTD_H */
+#else /* defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) */
int
main(void)
@@ -274,4 +274,4 @@ main(void)
return EXIT_SUCCESS;
} /* end main() */
-#endif /* H5_HAVE_UNISTD_H */
+#endif /* defined(H5_HAVE_FORK) && defined(H5_HAVE_WAITPID) */
diff --git a/test/vds_env.c b/test/vds_env.c
index 17c3876..e01f2e0 100644
--- a/test/vds_env.c
+++ b/test/vds_env.c
@@ -346,8 +346,9 @@ main(void)
/* Display testing info */
low_string = h5_get_version_string(low);
high_string = h5_get_version_string(high);
- HDsprintf(msg, "Testing virtual dataset with file version bounds: (%s, %s):", low_string,
- high_string);
+ HDsnprintf(msg, sizeof(msg),
+ "Testing virtual dataset with file version bounds: (%s, %s):", low_string,
+ high_string);
HDputs(msg);
for (bit_config = 0; bit_config < TEST_IO_NTESTS; bit_config++) {
diff --git a/test/vfd.c b/test/vfd.c
index 4f28766..1b83228 100644
--- a/test/vfd.c
+++ b/test/vfd.c
@@ -86,6 +86,60 @@ static int __k;
HDprintf((__k % 4 == 0) ? " %02X" : " %02X", (unsigned char)(buf)[__k]); \
} /* end #define HEXPRINT() */
+/* Macro SET_SIZE()
+ *
+ * Helper macro to track the sizes of entries in a vector
+ * I/O call when stepping through the vector incrementally.
+ * Assuming that bool_size_fixed is initialized to FALSE
+ * before the scan, this macro will detect the sizes array
+ * optimization for the case in which all remaining entries
+ * are of the same size, and set size_value accordingly.
+ *
+ * JRM -- 3/11/21
+ */
+#define SET_SIZE(bool_size_fixed, sizes_array, size_value, idx) \
+ do { \
+ if (!(bool_size_fixed)) { \
+ \
+ if ((sizes_array)[idx] == 0) { \
+ \
+ HDassert((idx) > 0); \
+ (bool_size_fixed) = TRUE; \
+ } \
+ else { \
+ \
+ (size_value) = (sizes_array)[idx]; \
+ } \
+ } \
+ } while (FALSE)
+
+/* Macro SET_TYPE()
+ *
+ * Helper macro to track the types of entries in a vector
+ * I/O call when stepping through the vector incrementally.
+ * Assuming that bool_type_fixed is initialized to FALSE
+ * before the scan, this macro will detect the types array
+ * optimization for the case in which all remaining entries
+ * are of the same type, and set type_value accordingly.
+ *
+ * JRM -- 3/11/21
+ */
+#define SET_TYPE(bool_type_fixed, types_array, type_value, idx) \
+ do { \
+ if (!(bool_type_fixed)) { \
+ \
+ if ((types_array)[idx] == H5FD_MEM_NOLIST) { \
+ \
+ HDassert((idx) > 0); \
+ (bool_type_fixed) = TRUE; \
+ } \
+ else { \
+ \
+ (type_value) = (types_array)[idx]; \
+ } \
+ } \
+ } while (FALSE)
+
/* Helper structure to pass around dataset information.
*/
struct splitter_dataset_def {
@@ -3420,6 +3474,60 @@ error:
#undef SPLITTER_TEST_FAULT
+/*****************************************************************************
+ *
+ * Function setup_rand()
+ *
+ * Purpose: Use gettimeofday() to obtain a seed for rand(), print the
+ * seed to stdout, and then pass it to srand().
+ *
+ * This is a version of the same routine in
+ * testpar/t_cache.c modified for use in serial tests.
+ *
+ * Return: void.
+ *
+ * Programmer: JRM -- 6/20/20
+ *
+ *****************************************************************************/
+static void
+setup_rand(void)
+{
+ hbool_t use_predefined_seed = FALSE;
+ unsigned predefined_seed = 18669;
+ unsigned seed;
+ struct timeval tv;
+
+ if (use_predefined_seed) {
+
+ seed = predefined_seed;
+
+ HDfprintf(stdout, "\n%s: predefined_seed = %d.\n\n", __func__, seed);
+ HDfflush(stdout);
+
+ HDsrand(seed);
+ }
+ else {
+
+ if (HDgettimeofday(&tv, NULL) != 0) {
+
+ HDfprintf(stdout, "\n%s: gettimeofday() failed -- srand() not called.\n\n", __func__);
+ HDfflush(stdout);
+ }
+ else {
+
+ seed = (unsigned)tv.tv_usec;
+
+ HDfprintf(stdout, "\n%s: seed = %d.\n\n", __func__, seed);
+ HDfflush(stdout);
+
+ HDsrand(seed);
+ }
+ }
+
+ return;
+
+} /* setup_rand() */
+
/*
* Callback implementations for ctl feature testing VFD
*/
@@ -3488,6 +3596,7 @@ H5FD__ctl_test_vfd_ctl(H5FD_t H5_ATTR_UNUSED *_file, uint64_t op_code, uint64_t
/* Minimal VFD for ctl feature tests */
static const H5FD_class_t H5FD_ctl_test_vfd_g = {
+ H5FD_CLASS_VERSION, /* struct version */
(H5FD_class_value_t)201, /* value */
"ctl_test_vfd", /* name */
HADDR_MAX, /* maxaddr */
@@ -3516,6 +3625,10 @@ static const H5FD_class_t H5FD_ctl_test_vfd_g = {
NULL, /* get_handle */
H5FD__ctl_test_vfd_read, /* read */
H5FD__ctl_test_vfd_write, /* write */
+ NULL, /* read_vector */
+ NULL, /* write_vector */
+ NULL, /* read_selection */
+ NULL, /* write_selection */
NULL, /* flush */
NULL, /* truncate */
NULL, /* lock */
@@ -3915,6 +4028,1922 @@ error:
}
/*-------------------------------------------------------------------------
+ * Function: test_vector_io__setup_v
+ *
+ * Purpose: Construct and initialize a vector of I/O requests used
+ * to test vector I/O. Note that while the vectors are
+ * allocated and initialized, they are not assigned
+ * base addresses.
+ *
+ * All arrays parameters are presumed to be of length
+ * count.
+ *
+ * Return: Return TRUE if successful, and FALSE if any errors
+ * are encountered.
+ *
+ * Programmer: John Mainzer
+ * 6/21/20
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+test_vector_io__setup_v(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ void *write_bufs[], void *read_bufs[], char base_fill_char)
+{
+ hbool_t result = TRUE; /* will set to FALSE on failure */
+ char fill_char = base_fill_char;
+ uint32_t i;
+ uint32_t j;
+ H5FD_mem_t mem_types[6] = {H5FD_MEM_SUPER, H5FD_MEM_BTREE, H5FD_MEM_DRAW,
+ H5FD_MEM_GHEAP, H5FD_MEM_LHEAP, H5FD_MEM_OHDR};
+
+ /* set the arrays of pointers to the write and read buffers to NULL,
+ * so that we can release memory on failure.
+ */
+ for (i = 0; i < count; i++) {
+
+ write_bufs[i] = NULL;
+ read_bufs[i] = NULL;
+ }
+
+ for (i = 0; i < count; i++) {
+
+ types[i] = mem_types[i % 6];
+
+ addrs[i] = HADDR_UNDEF;
+
+ sizes[i] = (size_t)((rand() & 1023) + 1);
+
+ write_bufs[i] = HDmalloc(sizes[i] + 1);
+ read_bufs[i] = HDmalloc(sizes[i] + 1);
+
+ if ((NULL == write_bufs[i]) || (NULL == read_bufs[i])) {
+
+ HDfprintf(stderr, "%s: can't malloc read / write bufs.\n", __func__);
+ result = FALSE;
+ break;
+ }
+
+ for (j = 0; j < sizes[i]; j++) {
+
+ ((char *)(write_bufs[i]))[j] = fill_char;
+ ((char *)(read_bufs[i]))[j] = '\0';
+ }
+
+ ((char *)(write_bufs[i]))[sizes[i]] = '\0';
+ ((char *)(read_bufs[i]))[sizes[i]] = '\0';
+
+ fill_char++;
+ }
+
+ if (!result) { /* free buffers */
+
+ for (i = 0; i < count; i++) {
+
+ if (write_bufs[i]) {
+
+ HDfree(write_bufs[i]);
+ write_bufs[i] = NULL;
+ }
+
+ if (read_bufs[i]) {
+
+ HDfree(read_bufs[i]);
+ read_bufs[i] = NULL;
+ }
+ }
+ }
+
+ return (result);
+
+} /* end test_vector_io__setup_v() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_vector_io__setup_fixed_size_v
+ *
+ * Purpose: To test the optimization allowing short sizes and types
+ * arrays, construct and initialize a vector of I/O requests
+ * with each request of the same size and type, and use the
+ * optimizatin to allow reduced length sizes and types
+ * vectors. Since the function is supplied with types and
+ * sizes vectors of length count, simulate shorter vectors
+ * by initializing the sizes and types vectors to values
+ * that will cause failure if used.
+ *
+ * All arrays parameters are presumed to be of length
+ * count. Count is presumed to be a power of 2, and at
+ * least 2.
+ *
+ * Return: Return TRUE if successful, and FALSE if any errors
+ * are encountered.
+ *
+ * Programmer: John Mainzer
+ * 3/10/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+test_vector_io__setup_fixed_size_v(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ void *write_bufs[], void *read_bufs[], char base_fill_char)
+{
+ hbool_t result = TRUE; /* will set to FALSE on failure */
+ char fill_char = base_fill_char;
+ uint32_t fix_point;
+ uint32_t i;
+ uint32_t j;
+ uint32_t k;
+ H5FD_mem_t mem_types[6] = {H5FD_MEM_SUPER, H5FD_MEM_BTREE, H5FD_MEM_DRAW,
+ H5FD_MEM_GHEAP, H5FD_MEM_LHEAP, H5FD_MEM_OHDR};
+
+ /* set the arrays of pointers to the write and read buffers to NULL,
+ * so that we can release memory on failure.
+ *
+ * Set the types[] and sizes[] arrays to invalid / improbable values
+ * so that use of these values will trigger failures.
+ */
+ for (i = 0; i < count; i++) {
+
+ write_bufs[i] = NULL;
+ read_bufs[i] = NULL;
+ types[i] = H5FD_MEM_NTYPES;
+ sizes[i] = SIZE_MAX;
+ }
+
+ /* randomly select the point in the vector after which all entries are
+ * fixed at the same size and type. Observe that 0 <= fix_point <
+ * count / 2.
+ */
+ fix_point = ((uint32_t)rand() & (count - 1)) / 2;
+
+ HDassert(fix_point < count / 2);
+
+ for (i = 0; i < count; i++) {
+
+ if (i <= fix_point) {
+
+ types[i] = mem_types[i % 6];
+
+ addrs[i] = HADDR_UNDEF;
+
+ sizes[i] = (size_t)((rand() & 1023) + 1);
+
+ write_bufs[i] = HDmalloc(sizes[i] + 1);
+ read_bufs[i] = HDmalloc(sizes[i] + 1);
+ }
+ else {
+
+ if (i == fix_point + 1) {
+
+ /* set the sentinels that indicate that all remaining
+ * types and sizes are the same as the previous value.
+ */
+ types[i] = H5FD_MEM_NOLIST;
+ sizes[i] = 0;
+ }
+
+ addrs[i] = HADDR_UNDEF;
+
+ write_bufs[i] = HDmalloc(sizes[fix_point] + 1);
+ read_bufs[i] = HDmalloc(sizes[fix_point] + 1);
+ }
+
+ if ((NULL == write_bufs[i]) || (NULL == read_bufs[i])) {
+
+ HDfprintf(stderr, "%s: can't malloc read / write bufs.\n", __func__);
+ result = FALSE;
+ break;
+ }
+
+ /* need to avoid examining sizes beyond the fix_point */
+ k = MIN(i, fix_point);
+
+ for (j = 0; j < sizes[k]; j++) {
+
+ ((char *)(write_bufs[i]))[j] = fill_char;
+ ((char *)(read_bufs[i]))[j] = '\0';
+ }
+
+ ((char *)(write_bufs[i]))[sizes[k]] = '\0';
+ ((char *)(read_bufs[i]))[sizes[k]] = '\0';
+
+ fill_char++;
+ }
+
+ if (!result) { /* free buffers */
+
+ for (i = 0; i < count; i++) {
+
+ if (write_bufs[i]) {
+
+ HDfree(write_bufs[i]);
+ write_bufs[i] = NULL;
+ }
+
+ if (read_bufs[i]) {
+
+ HDfree(read_bufs[i]);
+ read_bufs[i] = NULL;
+ }
+ }
+ }
+
+ return (result);
+
+} /* end test_vector_io__setup_fixed_size_v() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_vector_io__read_v_indiv
+ *
+ * Purpose: Read the supplied vector as a sequence of individual
+ * reads.
+ *
+ * All arrays parameters are presumed to be of length
+ * count.
+ *
+ * Return: Return TRUE if successful, and FALSE if any errors
+ * are encountered.
+ *
+ * Programmer: John Mainzer
+ * 6/21/20
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+test_vector_io__read_v_indiv(H5FD_t *lf, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ void *read_bufs[])
+{
+ hbool_t size_fixed = FALSE;
+ hbool_t type_fixed = FALSE;
+ hbool_t result = TRUE; /* will set to FALSE on failure */
+ hbool_t verbose = FALSE;
+ uint32_t i;
+ size_t size = SIZE_MAX;
+ H5FD_mem_t type = H5FD_MEM_NTYPES;
+
+ for (i = 0; i < count; i++) {
+
+ SET_SIZE(size_fixed, sizes, size, i);
+
+ SET_TYPE(type_fixed, types, type, i);
+
+ if (H5FDread(lf, type, H5P_DEFAULT, addrs[i], size, read_bufs[i]) < 0) {
+
+ if (verbose) {
+
+ HDfprintf(stdout, "%s: H5FDread() failed on entry %d.\n", __func__, i);
+ }
+ result = FALSE;
+ break;
+ }
+ }
+
+ return (result);
+
+} /* end test_vector_io__read_v_indiv() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_vector_io__write_v_indiv
+ *
+ * Purpose: Write the supplied vector as a sequence of individual
+ * writes.
+ *
+ * All arrays parameters are presumed to be of length
+ * count.
+ *
+ * Return: Return TRUE if successful, and FALSE if any errors
+ * are encountered.
+ *
+ * Programmer: John Mainzer
+ * 6/21/20
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+test_vector_io__write_v_indiv(H5FD_t *lf, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ void *write_bufs[])
+{
+ hbool_t size_fixed = FALSE;
+ hbool_t type_fixed = FALSE;
+ hbool_t result = TRUE; /* will set to FALSE on failure */
+ hbool_t verbose = FALSE;
+ uint32_t i;
+ size_t size = SIZE_MAX;
+ H5FD_mem_t type = H5FD_MEM_NTYPES;
+
+ for (i = 0; i < count; i++) {
+
+ SET_SIZE(size_fixed, sizes, size, i);
+
+ SET_TYPE(type_fixed, types, type, i);
+
+ if (H5FDwrite(lf, type, H5P_DEFAULT, addrs[i], size, write_bufs[i]) < 0) {
+
+ if (verbose) {
+
+ HDfprintf(stdout, "%s: HDwrite() failed on entry %d.\n", __func__, i);
+ }
+ result = FALSE;
+ break;
+ }
+ }
+
+ return (result);
+
+} /* end test_vector_io__write_v_indiv() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: test_vector_io__verify_v
+ *
+ * Purpose: Verify that the read and write buffers of the supplied
+ * vectors are identical.
+ *
+ * Return: TRUE if the read and write vectors are identical, and
+ * FALSE otherwise.
+ *
+ * Programmer: John Mainzer
+ * 6/21/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+test_vector_io__verify_v(uint32_t count, H5FD_mem_t types[], size_t sizes[], void *write_bufs[],
+ void *read_bufs[], const char *name)
+{
+ hbool_t size_fixed = FALSE;
+ hbool_t type_fixed = FALSE;
+ hbool_t identical = TRUE;
+ hbool_t verbose = TRUE;
+ uint32_t i;
+ size_t j;
+ char * w_buf;
+ char * r_buf;
+ const char *mem_type_names[7] = {"H5FD_MEM_DEFAULT", "H5FD_MEM_SUPER", "H5FD_MEM_BTREE", "H5FD_MEM_DRAW",
+ "H5FD_MEM_GHEAP", "H5FD_MEM_LHEAP", "H5FD_MEM_OHDR"};
+ size_t size = SIZE_MAX;
+ H5FD_mem_t type = H5FD_MEM_NTYPES;
+
+ i = 0;
+
+ while ((i < count) && (identical)) {
+
+ SET_SIZE(size_fixed, sizes, size, i);
+
+ SET_TYPE(type_fixed, types, type, i);
+
+ w_buf = (char *)(write_bufs[i]);
+ r_buf = (char *)(read_bufs[i]);
+
+ j = 0;
+ while ((j < size) && (identical)) {
+
+ if (w_buf[j] != r_buf[j]) {
+
+ identical = FALSE;
+
+ if (verbose) {
+
+ HDfprintf(stdout, "\n\nread/write buf mismatch in vector/entry");
+ HDfprintf(stdout, "\"%s\"/%u at offset %llu/%llu w/r = %c/%c type = %s\n\n", name,
+ (unsigned)i, (long long unsigned)j, (long long unsigned)size, w_buf[j],
+ r_buf[j], mem_type_names[type]);
+ }
+ }
+ j++;
+ }
+ i++;
+ }
+
+ return (identical);
+
+} /* end test_vector_io__verify_v() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: test_vector_io__dump_test_vectors
+ *
+ * Purpose: Print a set of test vectors to stdout.
+ * Vectors are assumed to be of length count, and
+ * buffers must be either NULL, or null terminate strings
+ * of char.
+ *
+ * Return: void.
+ *
+ * Programmer: John Mainzer
+ * 6/21/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+test_vector_io__dump_test_vectors(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[],
+ void *write_bufs[], void *read_bufs[], const char *name)
+{
+ hbool_t size_fixed = FALSE;
+ hbool_t type_fixed = FALSE;
+ uint32_t i;
+ const char *mem_type_names[7] = {"H5FD_MEM_DEFAULT", "H5FD_MEM_SUPER", "H5FD_MEM_BTREE", "H5FD_MEM_DRAW",
+ "H5FD_MEM_GHEAP", "H5FD_MEM_LHEAP", "H5FD_MEM_OHDR"};
+ size_t size = SIZE_MAX;
+ H5FD_mem_t type = H5FD_MEM_NTYPES;
+
+ char *w_buf;
+ char *r_buf;
+
+ HDfprintf(stdout, "\n\nDumping test vector \"%s\" of length %d\n\n", name, count);
+
+ for (i = 0; i < count; i++) {
+
+ SET_SIZE(size_fixed, sizes, size, i);
+
+ SET_TYPE(type_fixed, types, type, i);
+
+ HDassert((H5FD_MEM_DEFAULT <= type) && (type <= H5FD_MEM_OHDR));
+
+ w_buf = (char *)(write_bufs[i]);
+
+ if (read_bufs) {
+
+ r_buf = (char *)(read_bufs[i]);
+ }
+ else {
+
+ r_buf = NULL;
+ }
+
+ HDfprintf(stdout, "%u: addr/len = %llu/%llu, type = %s, w_buf = \"%s\"\n", (unsigned)i,
+ (long long unsigned)(addrs[i]), (long long unsigned)(size), mem_type_names[type], w_buf);
+
+ if (r_buf) {
+
+ HDfprintf(stdout, " r_buf = \"%s\"\n", r_buf);
+ }
+ }
+
+ return;
+
+} /* end test_vector_io__dump_test_vectors() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_vector_io
+ *
+ * Purpose: Test I/O using the vector I/O VFD public VFD calls.
+ *
+ * Test proceeds as follows:
+ *
+ * 1) read / write vectors and verify results
+ *
+ * 2) write individual / read vector and verify results
+ *
+ * 3) write vector / read individual and verify results
+ *
+ * 4) Close and then re-open the file, verify data written
+ * above.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: John Mainzer
+ * 6/20/20
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+#define VECTOR_LEN 16
+
+static herr_t
+test_vector_io(const char *vfd_name)
+{
+ char test_title[80];
+ hbool_t size_fixed_0 = FALSE; /* whether remaining entry */
+ hbool_t size_fixed_1 = FALSE; /* sizes in vector are fixed. */
+ hbool_t size_fixed_2 = FALSE; /* */
+ hbool_t type_fixed_0 = FALSE; /* whether remaining entry */
+ hbool_t type_fixed_1 = FALSE; /* types in vector are fixed. */
+ hbool_t type_fixed_2 = FALSE; /* */
+ hbool_t verbose = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ haddr_t eoa; /* file eoa */
+ char filename[1024]; /* filename */
+ char * buf; /* tmp ptr to buf */
+ unsigned flags = 0; /* file open flags */
+ H5FD_t * lf; /* VFD struct ptr */
+ uint32_t i; /* index */
+ uint32_t j; /* index */
+ uint32_t count = VECTOR_LEN; /* length of vectors */
+ H5FD_mem_t types_0[VECTOR_LEN]; /* types vector */
+ H5FD_mem_t types_1[VECTOR_LEN]; /* types vector */
+ H5FD_mem_t types_2[VECTOR_LEN]; /* types vector */
+ H5FD_mem_t f_types_0[VECTOR_LEN]; /* fixed types vector */
+ H5FD_mem_t f_types_1[VECTOR_LEN]; /* fixed types vector */
+ H5FD_mem_t f_types_2[VECTOR_LEN]; /* fixed types vector */
+ H5FD_mem_t f_type_0 = H5FD_MEM_NTYPES; /* current type for f vector 0 */
+ H5FD_mem_t f_type_1 = H5FD_MEM_NTYPES; /* current type for f vector 1 */
+ H5FD_mem_t f_type_2 = H5FD_MEM_NTYPES; /* current type for f vector 2 */
+ haddr_t addrs_0[VECTOR_LEN]; /* addresses vector */
+ haddr_t addrs_1[VECTOR_LEN]; /* addresses vector */
+ haddr_t addrs_2[VECTOR_LEN]; /* addresses vector */
+ haddr_t f_addrs_0[VECTOR_LEN]; /* fixed addresses vector */
+ haddr_t f_addrs_1[VECTOR_LEN]; /* fixed addresses vector */
+ haddr_t f_addrs_2[VECTOR_LEN]; /* fixed addresses vector */
+ size_t sizes_0[VECTOR_LEN]; /* sizes vector */
+ size_t sizes_1[VECTOR_LEN]; /* sizes vector */
+ size_t sizes_2[VECTOR_LEN]; /* sizes vector */
+ size_t f_sizes_0[VECTOR_LEN]; /* fixed sizes vector */
+ size_t f_sizes_1[VECTOR_LEN]; /* fixed sizes vector */
+ size_t f_sizes_2[VECTOR_LEN]; /* fixed sizes vector */
+ size_t f_size_0 = 0; /* current size for f vector 0 */
+ size_t f_size_1 = 0; /* current size for f vector 1 */
+ size_t f_size_2 = 0; /* current size for f vector 2 */
+ void * write_bufs_0[VECTOR_LEN]; /* write bufs vector */
+ void * write_bufs_1[VECTOR_LEN]; /* write bufs vector */
+ void * write_bufs_2[VECTOR_LEN]; /* write bufs vector */
+ void * f_write_bufs_0[VECTOR_LEN]; /* fixed write bufs vector */
+ void * f_write_bufs_1[VECTOR_LEN]; /* fixed write bufs vector */
+ void * f_write_bufs_2[VECTOR_LEN]; /* fixed write bufs vector */
+ void * read_bufs_0[VECTOR_LEN]; /* read bufs vector */
+ void * read_bufs_1[VECTOR_LEN]; /* read bufs vector */
+ void * read_bufs_2[VECTOR_LEN]; /* read bufs vector */
+ void * f_read_bufs_0[VECTOR_LEN]; /* fixed read bufs vector */
+ void * f_read_bufs_1[VECTOR_LEN]; /* fixed read bufs vector */
+ void * f_read_bufs_2[VECTOR_LEN]; /* fixed read bufs vector */
+
+ HDsnprintf(test_title, sizeof(test_title), "vector I/O with %s VFD", vfd_name);
+
+ TESTING(test_title);
+
+ /* Set property list and file name for target driver */
+
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ if (HDstrcmp(vfd_name, "sec2") == 0) {
+
+ if (H5Pset_fapl_sec2(fapl_id) < 0)
+ TEST_ERROR;
+
+ h5_fixname(FILENAME[0], fapl_id, filename, sizeof(filename));
+ }
+ else if (HDstrcmp(vfd_name, "stdio") == 0) {
+
+ if (H5Pset_fapl_stdio(fapl_id) < 0)
+ TEST_ERROR;
+
+ h5_fixname(FILENAME[7], fapl_id, filename, sizeof filename);
+ }
+ else {
+
+ HDfprintf(stdout, "un-supported VFD\n");
+ TEST_ERROR
+ }
+
+ /* setup the test vectors -- note that addresses are not set until
+ * we allocate space via the file driver.
+ */
+ if (!(test_vector_io__setup_v(count, types_0, addrs_0, sizes_0, write_bufs_0, read_bufs_0, 'a') &&
+ test_vector_io__setup_v(count, types_1, addrs_1, sizes_1, write_bufs_1, read_bufs_1, 'e') &&
+ test_vector_io__setup_v(count, types_2, addrs_2, sizes_2, write_bufs_2, read_bufs_2, 'A')))
+ TEST_ERROR;
+
+ if (!(test_vector_io__setup_fixed_size_v(count, f_types_0, f_addrs_0, f_sizes_0, f_write_bufs_0,
+ f_read_bufs_0, 'b') &&
+ test_vector_io__setup_fixed_size_v(count, f_types_1, f_addrs_1, f_sizes_1, f_write_bufs_1,
+ f_read_bufs_1, 'f') &&
+ test_vector_io__setup_fixed_size_v(count, f_types_2, f_addrs_2, f_sizes_2, f_write_bufs_2,
+ f_read_bufs_2, 'B')))
+ TEST_ERROR;
+
+ flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC;
+
+ if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF)))
+ TEST_ERROR;
+
+ /* allocate space for the data in the test vectors */
+ for (i = 0; i < count; i++) {
+
+ addrs_0[i] = H5FDalloc(lf, types_0[i], H5P_DEFAULT, (hsize_t)(sizes_0[i]));
+ addrs_1[i] = H5FDalloc(lf, types_1[i], H5P_DEFAULT, (hsize_t)(sizes_1[i]));
+ addrs_2[i] = H5FDalloc(lf, types_2[i], H5P_DEFAULT, (hsize_t)(sizes_2[i]));
+
+ if ((addrs_0[i] == HADDR_UNDEF) || (addrs_1[i] == HADDR_UNDEF) || (addrs_2[i] == HADDR_UNDEF))
+ TEST_ERROR;
+
+ SET_SIZE(size_fixed_0, f_sizes_0, f_size_0, i);
+ SET_SIZE(size_fixed_1, f_sizes_1, f_size_1, i);
+ SET_SIZE(size_fixed_2, f_sizes_2, f_size_2, i);
+
+ SET_TYPE(type_fixed_0, f_types_0, f_type_0, i);
+ SET_TYPE(type_fixed_1, f_types_1, f_type_1, i);
+ SET_TYPE(type_fixed_2, f_types_2, f_type_2, i);
+
+ f_addrs_0[i] = H5FDalloc(lf, f_type_0, H5P_DEFAULT, (hsize_t)(f_size_0));
+ f_addrs_1[i] = H5FDalloc(lf, f_type_1, H5P_DEFAULT, (hsize_t)(f_size_1));
+ f_addrs_2[i] = H5FDalloc(lf, f_type_2, H5P_DEFAULT, (hsize_t)(f_size_2));
+
+ if ((f_addrs_0[i] == HADDR_UNDEF) || (f_addrs_1[i] == HADDR_UNDEF) || (f_addrs_2[i] == HADDR_UNDEF))
+ TEST_ERROR;
+ }
+
+ if (verbose) {
+
+ test_vector_io__dump_test_vectors(count, types_0, addrs_0, sizes_0, write_bufs_0, NULL, "zero");
+
+ test_vector_io__dump_test_vectors(count, types_1, addrs_1, sizes_1, write_bufs_1, NULL, "one");
+
+ test_vector_io__dump_test_vectors(count, types_2, addrs_2, sizes_2, write_bufs_2, NULL, "two");
+
+ test_vector_io__dump_test_vectors(count, f_types_0, f_addrs_0, f_sizes_0, f_write_bufs_0, NULL,
+ "fixed zero");
+
+ test_vector_io__dump_test_vectors(count, f_types_1, f_addrs_1, f_sizes_1, f_write_bufs_1, NULL,
+ "fixed one");
+
+ test_vector_io__dump_test_vectors(count, f_types_2, f_addrs_2, f_sizes_2, f_write_bufs_2, NULL,
+ "fixed two");
+ }
+
+ /* write and then read using vector I/O. First, read/write vector
+ * of length 1, then of length 2, then remainder of vector
+ */
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, 1, &(types_0[0]), &(addrs_0[0]), &(sizes_0[0]),
+ &(write_bufs_0[0])) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, 1, &(types_0[0]), &(addrs_0[0]), &(sizes_0[0]), &(read_bufs_0[0])) <
+ 0)
+ TEST_ERROR;
+
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, 2, &(types_0[1]), &(addrs_0[1]), &(sizes_0[1]),
+ &(write_bufs_0[1])) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, 2, &(types_0[1]), &(addrs_0[1]), &(sizes_0[1]), &(read_bufs_0[1])) <
+ 0)
+ TEST_ERROR;
+
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, count - 3, &(types_0[3]), &(addrs_0[3]), &(sizes_0[3]),
+ &(write_bufs_0[3])) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count - 3, &(types_0[3]), &(addrs_0[3]), &(sizes_0[3]),
+ &(read_bufs_0[3])) < 0)
+ TEST_ERROR;
+
+ /* for fixed size / type vector, just write and read as single operations */
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, count, f_types_0, f_addrs_0, f_sizes_0, f_write_bufs_0) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_0, f_addrs_0, f_sizes_0, f_read_bufs_0) < 0)
+ TEST_ERROR;
+
+ /* verify that the expected data is read */
+ if (!test_vector_io__verify_v(count, types_0, sizes_0, write_bufs_0, read_bufs_0, "zero"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, f_types_0, f_sizes_0, f_write_bufs_0, f_read_bufs_0, "fixed zero"))
+ TEST_ERROR;
+
+ /* write the contents of a vector individually, and then read it back
+ * in several vector reads.
+ */
+ if (!test_vector_io__write_v_indiv(lf, count, types_1, addrs_1, sizes_1, write_bufs_1))
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, 1, &(types_1[0]), &(addrs_1[0]), &(sizes_1[0]), &(read_bufs_1[0])) <
+ 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, 2, &(types_1[1]), &(addrs_1[1]), &(sizes_1[1]), &(read_bufs_1[1])) <
+ 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count - 3, &(types_1[3]), &(addrs_1[3]), &(sizes_1[3]),
+ &(read_bufs_1[3])) < 0)
+ TEST_ERROR;
+
+ /* for fixed size, write individually, and the read back in a single call */
+ if (!test_vector_io__write_v_indiv(lf, count, f_types_1, f_addrs_1, f_sizes_1, f_write_bufs_1))
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_1, f_addrs_1, f_sizes_1, f_read_bufs_1) < 0)
+ TEST_ERROR;
+
+ /* verify that the expected data is read */
+ if (!test_vector_io__verify_v(count, types_1, sizes_1, write_bufs_1, read_bufs_1, "one"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, f_types_1, f_sizes_1, f_write_bufs_1, f_read_bufs_1, "fixed one"))
+ TEST_ERROR;
+
+ /* Write the contents of a vector as several vector writes, then
+ * read it back in individual reads.
+ */
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, 1, &(types_2[0]), &(addrs_2[0]), &(sizes_2[0]),
+ &(write_bufs_2[0])) < 0)
+ TEST_ERROR;
+
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, 2, &(types_2[1]), &(addrs_2[1]), &(sizes_2[1]),
+ &(write_bufs_2[1])) < 0)
+ TEST_ERROR;
+
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, count - 3, &(types_2[3]), &(addrs_2[3]), &(sizes_2[3]),
+ &(write_bufs_2[3])) < 0)
+ TEST_ERROR;
+
+ if (!test_vector_io__read_v_indiv(lf, count, types_2, addrs_2, sizes_2, read_bufs_2))
+ TEST_ERROR;
+
+ /* for fixed size, write as a single vector, read back individually */
+ if (H5FDwrite_vector(lf, H5P_DEFAULT, count, f_types_2, f_addrs_2, f_sizes_2, f_write_bufs_2) < 0)
+ TEST_ERROR;
+
+ if (!test_vector_io__read_v_indiv(lf, count, f_types_2, f_addrs_2, f_sizes_2, f_read_bufs_2))
+ TEST_ERROR;
+
+ /* verify that the expected data is read */
+ if (!test_vector_io__verify_v(count, types_2, sizes_2, write_bufs_2, read_bufs_2, "two"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, f_types_2, f_sizes_2, f_write_bufs_2, f_read_bufs_2, "fixed two"))
+ TEST_ERROR;
+
+ /* make note of eoa -- needed after we re-open the file */
+ if (HADDR_UNDEF == (eoa = H5FDget_eoa(lf, H5FD_MEM_DEFAULT)))
+ TEST_ERROR;
+
+ /* close the file and then re-open it */
+ if (H5FDclose(lf) < 0)
+ TEST_ERROR;
+
+ flags = H5F_ACC_RDWR;
+
+ if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF)))
+ TEST_ERROR;
+
+ /* The EOA is set to 0 on open. To avoid errors, we must set it
+ * to its correct value before we do any reads.
+ *
+ * Note: In the context of using the VFD layer without the HDF5
+ * library on top, this doesn't make much sense. Consider
+ * adding an open flag that sets the EOA to the current file
+ * size.
+ */
+ if (H5FDset_eoa(lf, H5FD_MEM_DEFAULT, eoa) < 0)
+ TEST_ERROR;
+
+ /* Null the read vectors */
+
+ size_fixed_0 = FALSE;
+ size_fixed_1 = FALSE;
+ size_fixed_2 = FALSE;
+
+ for (i = 0; i < count; i++) {
+
+ buf = read_bufs_0[i];
+ for (j = 0; j < sizes_0[i]; j++) {
+ buf[j] = '\0';
+ }
+
+ buf = read_bufs_1[i];
+ for (j = 0; j < sizes_1[i]; j++) {
+ buf[j] = '\0';
+ }
+
+ buf = read_bufs_2[i];
+ for (j = 0; j < sizes_2[i]; j++) {
+ buf[j] = '\0';
+ }
+
+ SET_SIZE(size_fixed_0, f_sizes_0, f_size_0, i);
+ SET_SIZE(size_fixed_1, f_sizes_1, f_size_1, i);
+ SET_SIZE(size_fixed_2, f_sizes_2, f_size_2, i);
+
+ buf = f_read_bufs_0[i];
+ for (j = 0; j < f_size_0; j++) {
+ buf[j] = '\0';
+ }
+
+ buf = f_read_bufs_1[i];
+ for (j = 0; j < f_size_1; j++) {
+ buf[j] = '\0';
+ }
+
+ buf = f_read_bufs_2[i];
+ for (j = 0; j < f_size_2; j++) {
+ buf[j] = '\0';
+ }
+ }
+
+ /* read the contents of the file */
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, types_0, addrs_0, sizes_0, read_bufs_0) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, types_1, addrs_1, sizes_1, read_bufs_1) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, types_2, addrs_2, sizes_2, read_bufs_2) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_0, f_addrs_0, f_sizes_0, f_read_bufs_0) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_1, f_addrs_1, f_sizes_1, f_read_bufs_1) < 0)
+ TEST_ERROR;
+
+ if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_2, f_addrs_2, f_sizes_2, f_read_bufs_2) < 0)
+ TEST_ERROR;
+
+ /* verify the contents. */
+ if (!test_vector_io__verify_v(count, types_0, sizes_0, write_bufs_0, read_bufs_0, "zero-"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, types_1, sizes_1, write_bufs_1, read_bufs_1, "one-"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, types_2, sizes_2, write_bufs_2, read_bufs_2, "two-"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, f_types_0, f_sizes_0, f_write_bufs_0, f_read_bufs_0, "fixed zero-"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, f_types_1, f_sizes_1, f_write_bufs_1, f_read_bufs_1, "fixed one-"))
+ TEST_ERROR;
+
+ if (!test_vector_io__verify_v(count, f_types_2, f_sizes_2, f_write_bufs_2, f_read_bufs_2, "fixed two-"))
+ TEST_ERROR;
+
+ if (H5FDclose(lf) < 0)
+ TEST_ERROR;
+
+ h5_delete_test_file(FILENAME[0], fapl_id);
+
+ /* Close the fapl */
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ /* discard the read and write buffers */
+
+ for (i = 0; i < count; i++) {
+
+ HDfree(write_bufs_0[i]);
+ write_bufs_0[i] = NULL;
+
+ HDfree(write_bufs_1[i]);
+ write_bufs_1[i] = NULL;
+
+ HDfree(write_bufs_2[i]);
+ write_bufs_2[i] = NULL;
+
+ HDfree(read_bufs_0[i]);
+ read_bufs_0[i] = NULL;
+
+ HDfree(read_bufs_1[i]);
+ read_bufs_1[i] = NULL;
+
+ HDfree(read_bufs_2[i]);
+ read_bufs_2[i] = NULL;
+
+ HDfree(f_write_bufs_0[i]);
+ f_write_bufs_0[i] = NULL;
+
+ HDfree(f_write_bufs_1[i]);
+ f_write_bufs_1[i] = NULL;
+
+ HDfree(f_write_bufs_2[i]);
+ f_write_bufs_2[i] = NULL;
+
+ HDfree(f_read_bufs_0[i]);
+ f_read_bufs_0[i] = NULL;
+
+ HDfree(f_read_bufs_1[i]);
+ f_read_bufs_1[i] = NULL;
+
+ HDfree(f_read_bufs_2[i]);
+ f_read_bufs_2[i] = NULL;
+ }
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5FDclose(lf);
+ }
+ H5E_END_TRY;
+ return -1;
+} /* end test_vector_io() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_selection_io_write
+ *
+ * Purpose: Updates write buffers to ensure a unique value is written
+ * to each element and issues a selection write call.
+ *
+ * Return: Success: TRUE
+ * Failure: FALSE
+ *
+ * Programmer: Neil Fortner
+ * 7/1/21
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+/* Array dimensions, used for all selection I/O tests. Currently both must be
+ * even. 1-Dimensional arrays have a size of SEL_IO_DIM0 * SEL_IO_DIM1. */
+#define SEL_IO_DIM0 8
+#define SEL_IO_DIM1 10
+
+static herr_t
+test_selection_io_write(H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t mem_spaces[], hid_t file_spaces[],
+ haddr_t offsets[], size_t element_sizes[], int *wbufs[])
+{
+ int i;
+ int j;
+
+ /* Update write buffer */
+ for (i = 0; i < (int)count; i++)
+ if (wbufs[i] && (i == 0 || wbufs[i] != wbufs[i - 1]))
+ for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++)
+ wbufs[i][j] += 2 * SEL_IO_DIM0 * SEL_IO_DIM1;
+
+ /* Issue write call */
+ if (H5FDwrite_selection(lf, type, H5P_DEFAULT, count, mem_spaces, file_spaces, offsets, element_sizes,
+ (const void **)wbufs) < 0)
+ TEST_ERROR
+
+ return 0;
+
+error:
+ return -1;
+} /* end test_selection_io_write() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_selection_io_read_verify
+ *
+ * Purpose: Issues a selection read call and compares the result to
+ * the arrays provided in erbufs. If rbufcount is less than
+ * count the last element in erbufs will be repeated to make
+ * up the difference.
+ *
+ * Return: Success: TRUE
+ * Failure: FALSE
+ *
+ * Programmer: Neil Fortner
+ * 7/1/21
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_selection_io_read_verify(H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t mem_spaces[],
+ hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[],
+ uint32_t rbufcount, int *erbufs[], hbool_t shorten_rbufs)
+{
+ int rbuf1[SEL_IO_DIM0 * SEL_IO_DIM1];
+ int rbuf2[SEL_IO_DIM0 * SEL_IO_DIM1];
+ int *rbufs[2] = {rbuf1, rbuf2};
+ int i;
+ int j;
+
+ /* Initialize read buffer */
+ for (i = 0; i < (int)rbufcount; i++)
+ for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++)
+ rbufs[i][j] = -1;
+
+ /* Handle elements in count that are not part of rbufcount */
+ for (i = (int)rbufcount; i < (int)count; i++)
+ if (shorten_rbufs)
+ rbufs[i] = NULL;
+ else
+ rbufs[i] = rbufs[rbufcount - 1];
+
+ /* Issue read call */
+ if (H5FDread_selection(lf, type, H5P_DEFAULT, count, mem_spaces, file_spaces, offsets, element_sizes,
+ (void **)rbufs) < 0)
+ TEST_ERROR
+
+ /* Verify result */
+ for (i = 0; i < (int)rbufcount; i++)
+ for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++)
+ if (rbufs[i][j] != erbufs[i][j]) {
+ H5_FAILED()
+ AT()
+ HDprintf("data read from file does not match expected values at mapping array location %d\n",
+ i);
+ HDprintf("expected data: \n");
+ for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) {
+ printf("%6d", erbufs[i][j]);
+ if (!((j + 1) % SEL_IO_DIM1))
+ printf("\n");
+ }
+ HDprintf("read data: \n");
+ for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) {
+ printf("%6d", rbufs[i][j]);
+ if (!((j + 1) % SEL_IO_DIM1))
+ printf("\n");
+ }
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+} /* end test_selection_io_read_verify() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_selection_io
+ *
+ * Purpose: Test I/O using the selection I/O VFD public VFD calls.
+ *
+ * Tests various combinations of 1D, 2D, contiguous, and
+ * strided selections with different file data types and
+ * with and without shortened arrays.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Neil Fortner
+ * 7/1/21
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_selection_io(const char *vfd_name)
+{
+ char test_title[80];
+ hid_t fapl_id = -1; /* file access property list ID */
+ char filename[1024]; /* filename */
+ unsigned flags = 0; /* file open flags */
+ H5FD_t * lf; /* VFD struct ptr */
+ int i; /* index */
+ int j; /* index */
+ int i2; /* index */
+ int j2; /* index */
+ hid_t mem_spaces[2] = {H5I_INVALID_HID, H5I_INVALID_HID}; /* memory dataspaces vector */
+ hid_t file_spaces[2] = {H5I_INVALID_HID, H5I_INVALID_HID}; /* file dataspaces vector */
+ hsize_t dims1[1] = {SEL_IO_DIM0 * SEL_IO_DIM1}; /* 1D dataspace dimensions */
+ hsize_t dims2[2] = {SEL_IO_DIM0, SEL_IO_DIM1}; /* 1D dataspace dimensions */
+ hsize_t start[2]; /* start for hyperslab */
+ hsize_t stride[2]; /* stride for hyperslab */
+ hsize_t count[2]; /* count for hyperslab */
+ hsize_t block[2]; /* block for hyperslab */
+ H5FD_mem_t type; /* file data type */
+ haddr_t addrs[2]; /* addresses vector */
+ size_t element_sizes[2] = {sizeof(int), sizeof(int)}; /* element sizes vector */
+ int wbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; /* 1D write buffer */
+ int wbuf2[SEL_IO_DIM0][SEL_IO_DIM1]; /* 2D write buffer */
+ int * wbufs[2] = {wbuf1, wbuf2[0]}; /* Array of write buffers */
+ int fbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; /* 1D file buffer */
+ int fbuf2[SEL_IO_DIM0][SEL_IO_DIM1]; /* 2D file buffer */
+ int * fbufs[2] = {fbuf1, fbuf2[0]}; /* Array of file buffers */
+ int erbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; /* 1D expected read buffer */
+ int erbuf2[SEL_IO_DIM0][SEL_IO_DIM1]; /* 2D expected read buffer */
+ int * erbufs[2] = {erbuf1, erbuf2[0]}; /* Array of expected read buffers */
+ int shorten_element_sizes; /* Whether to shorten the element sizes array */
+
+ HDsnprintf(test_title, sizeof(test_title), "selection I/O with %s VFD", vfd_name);
+
+ TESTING(test_title);
+
+ /* Set property list and file name for target driver */
+
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+
+ if (HDstrcmp(vfd_name, "sec2") == 0) {
+
+ if (H5Pset_fapl_sec2(fapl_id) < 0)
+ TEST_ERROR
+
+ h5_fixname(FILENAME[0], fapl_id, filename, sizeof(filename));
+ }
+ else if (HDstrcmp(vfd_name, "stdio") == 0) {
+
+ if (H5Pset_fapl_stdio(fapl_id) < 0)
+ TEST_ERROR
+
+ h5_fixname(FILENAME[7], fapl_id, filename, sizeof filename);
+ }
+ else {
+
+ HDfprintf(stdout, "un-supported VFD\n");
+ TEST_ERROR
+ }
+
+ /* Initialize write buffers */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++) {
+ wbuf1[(i * SEL_IO_DIM1) + j] = (i * SEL_IO_DIM1) + j;
+ wbuf2[i][j] = (i * SEL_IO_DIM1) + j + (SEL_IO_DIM0 * SEL_IO_DIM1);
+ }
+
+ /* Create dataspaces - location 0 will be 1D and location 1 will be 2D */
+ if ((mem_spaces[0] = H5Screate_simple(1, dims1, NULL)) < 0)
+ TEST_ERROR
+ if ((mem_spaces[1] = H5Screate_simple(2, dims2, NULL)) < 0)
+ TEST_ERROR
+ if ((file_spaces[0] = H5Screate_simple(1, dims1, NULL)) < 0)
+ TEST_ERROR
+ if ((file_spaces[1] = H5Screate_simple(2, dims2, NULL)) < 0)
+ TEST_ERROR
+
+ /* Create file */
+ flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC;
+
+ if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF)))
+ TEST_ERROR;
+
+ /* Loop over memory types */
+ for (type = 1; type < H5FD_MEM_NTYPES; type++) {
+ /* Allocate space for I/O */
+ addrs[0] = H5FDalloc(lf, type, H5P_DEFAULT, (hsize_t)(sizeof(int) * SEL_IO_DIM0 * SEL_IO_DIM1));
+ addrs[1] = H5FDalloc(lf, type, H5P_DEFAULT, (hsize_t)(sizeof(int) * SEL_IO_DIM0 * SEL_IO_DIM1));
+
+ /*
+ * Test 1: Simple 1D contiguous I/O
+ */
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes,
+ (int **)&wbufs[0]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < SEL_IO_DIM0 * SEL_IO_DIM1; i++)
+ fbuf1[i] = wbuf1[i];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&fbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 2: Simple 2D contiguous I/O
+ */
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes,
+ (int **)&wbufs[1]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ fbuf2[i][j] = wbuf2[i][j];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&fbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 3: Strided <> Contiguous 1D I/O
+ */
+ /* SEL_IO_DIM1 must be even */
+ HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2);
+
+ /* Strided selection in memory */
+ start[0] = 1;
+ stride[0] = 2;
+ count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2;
+ block[0] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Contiguous selection in file */
+ if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes,
+ (int **)&wbufs[0]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ fbuf1[i + 1] = wbuf1[(2 * i) + 1];
+
+ /* Update expected read buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++)
+ erbuf1[i] = -1;
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ erbuf1[(2 * i) + 1] = wbuf1[(2 * i) + 1];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&erbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[0]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&fbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 4: Contiguous <> Strided 1D I/O
+ */
+ /* SEL_IO_DIM1 must be even */
+ HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2);
+
+ /* Contiguous selection in memory */
+ start[0] = 1;
+ stride[0] = 2;
+ if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ TEST_ERROR
+
+ /* Strided selection in file */
+ count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2;
+ block[0] = 1;
+ if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes,
+ (int **)&wbufs[0]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ fbuf1[(2 * i) + 1] = wbuf1[i + 1];
+
+ /* Update expected read buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++)
+ erbuf1[i] = -1;
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ erbuf1[i + 1] = wbuf1[i + 1];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&erbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[0]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&fbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 5: Strided <> Strided 1D I/O
+ */
+ /* SEL_IO_DIM1 must be even */
+ HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2);
+
+ /* Strided selection in memory */
+ start[0] = 1;
+ stride[0] = 2;
+ count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2;
+ block[0] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection in file */
+ start[0] = 0;
+ if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes,
+ (int **)&wbufs[0]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ fbuf1[2 * i] = wbuf1[(2 * i) + 1];
+
+ /* Update expected read buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++)
+ erbuf1[i] = -1;
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ erbuf1[(2 * i) + 1] = wbuf1[(2 * i) + 1];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&erbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[0]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&fbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 6: Strided <> Contiguous 2D I/O
+ */
+ /* Strided selection in memory */
+ start[0] = 1;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 1;
+ count[0] = SEL_IO_DIM0 / 2;
+ count[1] = SEL_IO_DIM1;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Contiguous selection in file */
+ if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes,
+ (int **)&wbufs[1]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < SEL_IO_DIM0 / 2; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ fbuf2[i + 1][j] = wbuf2[(2 * i) + 1][j];
+
+ /* Update expected read buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = -1;
+ for (i = 0; i < SEL_IO_DIM0 / 2; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[(2 * i) + 1][j] = wbuf2[(2 * i) + 1][j];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&erbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[1]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[1]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&fbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 7: Contiguous <> Strided 2D I/O
+ */
+ /* Contiguous selection in memory */
+ start[0] = 0;
+ start[1] = 1;
+ count[0] = SEL_IO_DIM0;
+ count[1] = SEL_IO_DIM1 / 2;
+ if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ TEST_ERROR
+
+ /* Strided selection in file */
+ stride[0] = 1;
+ stride[1] = 2;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes,
+ (int **)&wbufs[1]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1 / 2; j++)
+ fbuf2[i][(2 * j) + 1] = wbuf2[i][j + 1];
+
+ /* Update expected read buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = -1;
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1 / 2; j++)
+ erbuf2[i][j + 1] = wbuf2[i][j + 1];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&erbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[1]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[1]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&fbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 8: Strided <> Strided 2D I/O
+ */
+ /* SEL_IO_DIM0 and SEL_IO_DIM1 must be even */
+ HDassert(SEL_IO_DIM0 / 2 == (SEL_IO_DIM0 + 1) / 2);
+ HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2);
+
+ /* Strided selection (across dim 1) in memory */
+ start[0] = 0;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 2;
+ count[0] = SEL_IO_DIM0;
+ count[1] = SEL_IO_DIM1 / 2;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection (across dim 0) in file */
+ start[0] = 1;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 1;
+ count[0] = SEL_IO_DIM0 / 2;
+ count[1] = SEL_IO_DIM1;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes,
+ (int **)&wbufs[1]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0, i2 = 1, j2 = 0; i < SEL_IO_DIM0; i++)
+ for (j = 1; j < SEL_IO_DIM1; j += 2) {
+ HDassert(i2 < SEL_IO_DIM0);
+ fbuf2[i2][j2] = wbuf2[i][j];
+ if (++j2 == SEL_IO_DIM1) {
+ i2 += 2;
+ j2 = 0;
+ }
+ }
+
+ /* Update expected read buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = -1;
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 1; j < SEL_IO_DIM1; j += 2)
+ erbuf2[i][j] = wbuf2[i][j];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&erbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[1]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[1]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&fbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 9: Strided 1D <> Strided 2D I/O
+ */
+ /* Strided selection in memory */
+ start[0] = 1;
+ stride[0] = 2;
+ count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2;
+ block[0] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection (across dim 1) in file */
+ start[0] = 0;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 2;
+ count[0] = SEL_IO_DIM0;
+ count[1] = SEL_IO_DIM1 / 2;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[1], &addrs[1], element_sizes,
+ (int **)&wbufs[0]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 1, i2 = 0, j2 = 1; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i += 2) {
+ HDassert(i2 < SEL_IO_DIM0);
+ fbuf2[i2][j2] = wbuf1[i];
+ j2 += 2;
+ if (j2 >= SEL_IO_DIM1) {
+ i2++;
+ j2 = 1;
+ }
+ }
+
+ /* Update expected read buf */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++)
+ erbuf1[i] = -1;
+ for (i = 1; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i += 2)
+ erbuf1[i] = wbuf1[i];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&erbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[1]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[1], &addrs[1],
+ element_sizes, 1, (int **)&fbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 10: Strided 2D <> Strided 1D I/O
+ */
+ /* Strided selection (across dim 0) in memory */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 1;
+ count[0] = SEL_IO_DIM0 / 2;
+ count[1] = SEL_IO_DIM1;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection in file */
+ start[0] = 0;
+ stride[0] = 2;
+ count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2;
+ block[0] = 1;
+ if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[0], &addrs[0], element_sizes,
+ (int **)&wbufs[1]) < 0)
+ TEST_ERROR
+
+ /* Update file buf */
+ for (i = 0, i2 = 0; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++) {
+ HDassert(i2 < (SEL_IO_DIM0 * SEL_IO_DIM1));
+ fbuf1[i2] = wbuf2[i][j];
+ i2 += 2;
+ }
+
+ /* Update expected read buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = -1;
+ for (i = 0; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = wbuf2[i][j];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&erbufs[1], FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[1]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[0]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[0], &addrs[0],
+ element_sizes, 1, (int **)&fbufs[0], FALSE) < 0)
+ TEST_ERROR
+
+ /* Run tests with full and partial element sizes array */
+ for (shorten_element_sizes = 0; shorten_element_sizes <= 1; shorten_element_sizes++) {
+ /*
+ * Test 11: Strided <> Strided 1D and 2D I/O
+ */
+ /* SEL_IO_DIM1 must be even */
+ HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2);
+
+ /* Strided selection in memory (1D) */
+ start[0] = 0;
+ stride[0] = 2;
+ count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2;
+ block[0] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection in file (1D) */
+ start[0] = 1;
+ if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection (across dim 0) in memory (2D) */
+ start[0] = 1;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 1;
+ count[0] = SEL_IO_DIM0 / 2;
+ count[1] = SEL_IO_DIM1;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection (across dim 1) in file (2D) */
+ start[0] = 0;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 2;
+ count[0] = SEL_IO_DIM0;
+ count[1] = SEL_IO_DIM1 / 2;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes,
+ (int **)wbufs) < 0)
+ TEST_ERROR
+
+ /* Update file bufs */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ fbuf1[(2 * i) + 1] = wbuf1[2 * i];
+ for (i = 1, i2 = 0, j2 = 1; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++) {
+ HDassert(i2 < SEL_IO_DIM0);
+ fbuf2[i2][j2] = wbuf2[i][j];
+ j2 += 2;
+ if (j2 >= SEL_IO_DIM1) {
+ i2++;
+ j2 = 1;
+ }
+ }
+
+ /* Update expected read bufs */
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++)
+ erbuf1[i] = -1;
+ for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++)
+ erbuf1[2 * i] = wbuf1[2 * i];
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = -1;
+ for (i = 1; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = wbuf2[i][j];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 2,
+ (int **)erbufs, FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(mem_spaces[1]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[1]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 2,
+ (int **)fbufs, FALSE) < 0)
+ TEST_ERROR
+
+ /*
+ * Test 12: Strided <> Strided 2D I/O, 2 different selections in the same memory buffer
+ */
+ /* Switch mem and file spaces to both be 2D */
+ if (H5Sset_extent_simple(mem_spaces[0], 2, dims2, NULL) < 0)
+ TEST_ERROR
+ if (H5Sset_extent_simple(file_spaces[0], 2, dims2, NULL) < 0)
+ TEST_ERROR
+
+ /* Strided selection in memory (1st) */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 1;
+ count[0] = SEL_IO_DIM0 / 2;
+ count[1] = SEL_IO_DIM1;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection (across dim 0) in memory (2nd) */
+ start[0] = 1;
+ if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection in file (1st) */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 2;
+ count[0] = SEL_IO_DIM0;
+ count[1] = SEL_IO_DIM1 / 2;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Strided selection (across dim 1) in file (2nd) */
+ start[0] = 0;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 2;
+ count[0] = SEL_IO_DIM0;
+ count[1] = SEL_IO_DIM1 / 2;
+ block[0] = 1;
+ block[1] = 1;
+ if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR
+
+ /* Use the same memory buffer for both selections */
+ wbufs[0] = wbuf2[0];
+
+ /* Shorten wbuf array */
+ if (shorten_element_sizes)
+ wbufs[1] = NULL;
+ else
+ wbufs[1] = wbufs[0];
+
+ /* Issue write call */
+ if (test_selection_io_write(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes,
+ (int **)wbufs) < 0)
+ TEST_ERROR
+
+ /* Update file bufs - need to reuse 1D array so data stays consistent, so use math to
+ * find 1D index into 2D array */
+ for (i = 0, i2 = 0, j2 = 0; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++) {
+ HDassert(i2 < SEL_IO_DIM0);
+ fbuf1[(i2 * SEL_IO_DIM1) + j2] = wbuf2[i][j];
+ j2 += 2;
+ if (j2 >= SEL_IO_DIM1) {
+ i2++;
+ j2 = 0;
+ }
+ }
+ for (i = 1, i2 = 0, j2 = 1; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++) {
+ HDassert(i2 < SEL_IO_DIM0);
+ fbuf2[i2][j2] = wbuf2[i][j];
+ j2 += 2;
+ if (j2 >= SEL_IO_DIM1) {
+ i2++;
+ j2 = 1;
+ }
+ }
+
+ /* Update expected read buf */
+ for (i = 0; i < SEL_IO_DIM0; i++)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = -1;
+ for (i = 0; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = wbuf2[i][j];
+ for (i = 1; i < SEL_IO_DIM0; i += 2)
+ for (j = 0; j < SEL_IO_DIM1; j++)
+ erbuf2[i][j] = wbuf2[i][j];
+
+ /* Read and verify */
+ if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 1,
+ (int **)&erbufs[1], shorten_element_sizes ? TRUE : FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset selections */
+ if (H5Sselect_all(mem_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[0]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(mem_spaces[1]) < 0)
+ TEST_ERROR
+ if (H5Sselect_all(file_spaces[1]) < 0)
+ TEST_ERROR
+
+ /* Read entire file buffer and verify */
+ if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 2,
+ (int **)fbufs, FALSE) < 0)
+ TEST_ERROR
+
+ /* Reset first spaces to 1D */
+ if (H5Sset_extent_simple(mem_spaces[0], 1, dims1, NULL) < 0)
+ TEST_ERROR
+ if (H5Sset_extent_simple(file_spaces[0], 1, dims1, NULL) < 0)
+ TEST_ERROR
+
+ /* Reset write buffer array */
+ wbufs[0] = wbuf1;
+ wbufs[1] = wbuf2[0];
+
+ /* Change to shortened element sizes array */
+ element_sizes[1] = 0;
+ }
+
+ /* Reset element sizes array */
+ element_sizes[1] = element_sizes[0];
+ }
+
+ /*
+ * Cleanup
+ */
+ /* Close file */
+ if (H5FDclose(lf) < 0)
+ TEST_ERROR;
+
+ h5_delete_test_file(FILENAME[0], fapl_id);
+
+ /* Close the fapl */
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ /* Close dataspaces */
+ for (i = 0; i < 2; i++) {
+ if (H5Sclose(mem_spaces[i]) < 0)
+ TEST_ERROR
+ if (H5Sclose(file_spaces[i]) < 0)
+ TEST_ERROR
+ }
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5FDclose(lf);
+ for (i = 0; i < 2; i++) {
+ H5Sclose(mem_spaces[i]);
+ H5Sclose(file_spaces[i]);
+ }
+ }
+ H5E_END_TRY;
+ return -1;
+} /* end test_selection_io() */
+
+/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Tests the basic features of Virtual File Drivers
@@ -3943,6 +5972,8 @@ main(void)
HDprintf("Testing basic Virtual File Driver functionality.\n");
+ setup_rand();
+
nerrors += test_sec2() < 0 ? 1 : 0;
nerrors += test_core() < 0 ? 1 : 0;
nerrors += test_direct() < 0 ? 1 : 0;
@@ -3956,6 +5987,10 @@ main(void)
nerrors += test_windows() < 0 ? 1 : 0;
nerrors += test_ros3() < 0 ? 1 : 0;
nerrors += test_splitter() < 0 ? 1 : 0;
+ nerrors += test_vector_io("sec2") < 0 ? 1 : 0;
+ nerrors += test_vector_io("stdio") < 0 ? 1 : 0;
+ nerrors += test_selection_io("sec2") < 0 ? 1 : 0;
+ nerrors += test_selection_io("stdio") < 0 ? 1 : 0;
nerrors += test_ctl() < 0 ? 1 : 0;
if (nerrors) {
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index ff4446c..32f4a0f 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -89,6 +89,7 @@ set (H5P_TESTS
t_shapesame
t_filters_parallel
t_2Gio
+ t_vfd
)
foreach (h5_testp ${H5P_TESTS})
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 6a8cc2b..cbde0c1 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -30,7 +30,7 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA)
# Test programs. These are our main targets.
#
-TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio
+TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio t_vfd
# t_pflush1 and t_pflush2 are used by testpflush.sh
check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
index 2be4ae4..79241c6 100644
--- a/testpar/t_2Gio.c
+++ b/testpar/t_2Gio.c
@@ -3047,7 +3047,7 @@ compress_readAll(void)
nerrors++;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dwrite succeeded");
#endif
@@ -3644,7 +3644,7 @@ test_actual_io_mode(int selection_mode)
/* Set the threshold number of processes per chunk to twice mpi_size.
* This will prevent the threshold from ever being met, thus forcing
* multi chunk io instead of link chunk io.
- * This is via deault.
+ * This is via default.
*/
if (multi_chunk_io) {
/* force multi-chunk-io by threshold */
@@ -3853,12 +3853,6 @@ actual_io_mode_tests(void)
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
* Test for Externl-File storage as the cause of breaking collective I/O.
*
- * TEST_FILTERS:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
- * feature. Use test_no_collective_cause_mode_filter() function instead.
- *
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
@@ -3898,9 +3892,6 @@ test_no_collective_cause_mode(int selection_mode)
hid_t file_space = -1;
hsize_t chunk_dims[MAX_RANK];
herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
/* set to global value as default */
int l_facc_type = facc_type;
char message[256];
@@ -3932,21 +3923,6 @@ test_no_collective_cause_mode(int selection_mode)
is_chunked = 0;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY((ret >= 0), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
- (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
- "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0), "set filter (flecher32) succeeded");
- }
-#endif /* LATER */
-
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
sid = H5Screate(H5S_NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -4022,14 +3998,6 @@ test_no_collective_cause_mode(int selection_mode)
no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected |= H5D_MPIO_FILTERS;
- no_collective_cause_global_expected |= H5D_MPIO_FILTERS;
- }
-#endif /* LATER */
-
if (selection_mode & TEST_COLLECTIVE) {
test_name = "Broken Collective I/O - Not Broken";
no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
@@ -4166,240 +4134,6 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-#if 0
-/*
- * Function: test_no_collective_cause_mode_filter
- *
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
- * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
- * have the correct values.
- *
- * NOTE:
- * This is a temporary function.
- * test_no_collective_cause_mode(TEST_FILTERS) will replace this when
- * H5Dcreate and H5write support for mpio and filter feature.
- *
- * Input:
- * TEST_FILTERS_READ:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- *
- * Programmer: Jonathan Kim
- * Date: Aug, 2012
- */
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
-{
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_read = 0;
- uint32_t no_collective_cause_global_expected = 0;
-
- const char * filename;
- const char * test_name;
- hbool_t is_chunked=1;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_write = -1;
- hid_t fapl_read = -1;
- hid_t dcpl = -1;
- hid_t dxpl = -1;
- hsize_t dims[MAX_RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hsize_t chunk_dims[MAX_RANK];
- herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
- char message[256];
-
- /* Set up MPI parameters */
- MPI_Comm_size(test_comm, &mpi_size);
- MPI_Comm_rank(test_comm, &mpi_rank);
-
- MPI_Barrier(test_comm);
-
- HDassert(mpi_size >= 1);
-
- mpi_comm = test_comm;
- mpi_info = MPI_INFO_NULL;
-
- /* Create the dataset creation plist */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation plist created successfully");
-
- if (selection_mode == TEST_FILTERS_READ ) {
-#ifdef LATER /* fletcher32 */
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info);
- VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0),"set filter (flecher32) succeeded");
-#endif /* LATER */
- }
- else {
- VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
- }
-
- /* Create the basic Space */
- dims[0] = dim0;
- dims[1] = dim1;
- sid = H5Screate_simple (MAX_RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
-
- filename = (const char *)GetTestParameters();
- HDassert(filename != NULL);
-
- /* Setup the file access template */
- fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT);
- VRFY((fapl_write >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* If we are not testing contiguous datasets */
- if(is_chunked) {
- /* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
- chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0),"chunk creation property list succeeded");
- }
-
-
- /* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
-#ifdef LATER /* fletcher32 */
- /* Set expected cause */
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected = H5D_MPIO_FILTERS;
- no_collective_cause_global_expected = H5D_MPIO_FILTERS;
-#endif /* LATER */
-
- /* Get the file dataspace */
- file_space = H5Dget_space(dataset);
- VRFY((file_space >= 0), "H5Dget_space succeeded");
-
- /* Create the memory dataspace */
- mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
- VRFY((mem_space >= 0), "mem_space created");
-
- /* Get the number of elements in the selection */
- length = dim0 * dim1;
-
- /* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
- buffer[i] = i;
-
- /* Set up the dxpl for the write */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
- * because write fails with mpio + filter */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
- else {
- /* To test write in collective I/O mode. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
-
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
-
- /* Make a copy of the dxpl to test the read operation */
- dxpl = H5Pcopy(dxpl);
- VRFY((dxpl >= 0), "H5Pcopy succeeded");
-
- if (dataset)
- H5Dclose(dataset);
- if (fapl_write)
- H5Pclose(fapl_write);
- if (fid)
- H5Fclose(fid);
-
-
- /*---------------------
- * Test Read access
- *---------------------*/
-
- /* Setup the file access template */
- fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read);
- dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
-
- /* Set collective I/O properties in the dxpl. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Read */
- ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
- /* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
- VRFY((ret >= 0), "retrieving no collective cause succeeded" );
-
- /* Test values */
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
- VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- HDmemset (message, 0, sizeof (message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
- VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
-
- /* Release some resources */
- if (sid)
- H5Sclose(sid);
- if (fapl_read)
- H5Pclose(fapl_read);
- if (dcpl)
- H5Pclose(dcpl);
- if (dxpl)
- H5Pclose(dxpl);
- if (dataset)
- H5Dclose(dataset);
- if (mem_space)
- H5Sclose(mem_space);
- if (file_space)
- H5Sclose(file_space);
- if (fid)
- H5Fclose(fid);
- HDfree(buffer);
- return;
-}
-#endif
-
/* Function: no_collective_cause_tests
*
* Purpose: Tests cases for broken collective IO.
@@ -4420,13 +4154,6 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
-#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
- /* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
-#endif /* LATER */
/*
* Test combined causes
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 4df624b..0a971c5 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1107,13 +1107,15 @@ single_rank_independent_io(void)
HDprintf("\nSingle Rank Independent I/O\n");
if (MAIN_PROCESS) {
- hsize_t dims[] = {LARGE_DIM};
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dset_id = -1;
- hid_t fspace_id = -1;
- hid_t mspace_id = -1;
- void * data = NULL;
+ hsize_t dims[] = {LARGE_DIM};
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t fspace_id = -1;
+ hid_t mspace_id = -1;
+ herr_t ret;
+ int * data = NULL;
+ uint64_t i;
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
@@ -1135,6 +1137,10 @@ single_rank_independent_io(void)
data = malloc(LARGE_DIM * sizeof(int));
+ /* Initialize data */
+ for (i = 0; i < LARGE_DIM; i++)
+ data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT);
+
if (mpi_rank_g == 0)
H5Sselect_all(fspace_id);
else
@@ -1143,7 +1149,24 @@ single_rank_independent_io(void)
dims[0] = LARGE_DIM;
mspace_id = H5Screate_simple(1, dims, NULL);
VRFY_G((mspace_id >= 0), "H5Screate_simple mspace_id succeeded");
+
+ /* Write data */
H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dwrite succeeded");
+
+ /* Wipe buffer */
+ HDmemset(data, 0, LARGE_DIM * sizeof(int));
+
+ /* Read data back */
+ H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data */
+ for (i = 0; i < LARGE_DIM; i++)
+ if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
free(data);
H5Sclose(mspace_id);
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 8c96756..8559afb 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -6488,7 +6488,7 @@ trace_file_check(int metadata_write_strategy)
} /* end if */
if (nerrors == 0) {
- HDsprintf(trace_file_name, "t_cache_trace.txt.%d", (int)file_mpi_rank);
+ HDsnprintf(trace_file_name, sizeof(trace_file_name), "t_cache_trace.txt.%d", (int)file_mpi_rank);
if ((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL) {
@@ -6623,13 +6623,15 @@ trace_file_check(int metadata_write_strategy)
static hbool_t
smoke_check_6(int metadata_write_strategy)
{
- hbool_t success = TRUE;
- int i;
- int max_nerrors;
- hid_t fid = -1;
- H5F_t * file_ptr = NULL;
- H5C_t * cache_ptr = NULL;
- struct mssg_t mssg;
+ H5P_coll_md_read_flag_t md_reads_file_flag;
+ hbool_t md_reads_context_flag;
+ hbool_t success = TRUE;
+ int i;
+ int max_nerrors;
+ hid_t fid = -1;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ struct mssg_t mssg;
switch (metadata_write_strategy) {
@@ -6685,7 +6687,9 @@ smoke_check_6(int metadata_write_strategy)
virt_num_data_entries = NUM_DATA_ENTRIES;
/* insert the first half collectively */
- H5CX_set_coll_metadata_read(TRUE);
+ md_reads_file_flag = H5P_USER_TRUE;
+ md_reads_context_flag = TRUE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < virt_num_data_entries / 2; i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6704,9 +6708,13 @@ smoke_check_6(int metadata_write_strategy)
H5_CHECK_OVERFLOW(cache_ptr->max_cache_size, size_t, double);
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
/* insert the other half independently */
- H5CX_set_coll_metadata_read(FALSE);
+ md_reads_file_flag = H5P_USER_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6716,7 +6724,7 @@ smoke_check_6(int metadata_write_strategy)
if (FALSE != entry_ptr->header.coll_access) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
+ HDfprintf(stdout, "%d:%s: Entry inserted independently marked as collective.\n",
world_mpi_rank, __func__);
}
}
@@ -6724,6 +6732,8 @@ smoke_check_6(int metadata_write_strategy)
/* Make sure coll entries do not cross the 80% threshold */
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
/* flush the file */
if (H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
@@ -6734,7 +6744,9 @@ smoke_check_6(int metadata_write_strategy)
}
/* Protect the first half of the entries collectively */
- H5CX_set_coll_metadata_read(TRUE);
+ md_reads_file_flag = H5P_USER_TRUE;
+ md_reads_context_flag = TRUE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < (virt_num_data_entries / 2); i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6752,9 +6764,13 @@ smoke_check_6(int metadata_write_strategy)
/* Make sure coll entries do not cross the 80% threshold */
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
/* protect the other half independently */
- H5CX_set_coll_metadata_read(FALSE);
+ md_reads_file_flag = H5P_USER_FALSE;
+ md_reads_context_flag = FALSE;
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = virt_num_data_entries / 2; i < virt_num_data_entries; i++) {
struct datum *entry_ptr;
entry_ptr = &(data[i]);
@@ -6764,7 +6780,7 @@ smoke_check_6(int metadata_write_strategy)
if (FALSE != entry_ptr->header.coll_access) {
nerrors++;
if (verbose) {
- HDfprintf(stdout, "%d:%s: Entry inserted indepedently marked as collective.\n",
+ HDfprintf(stdout, "%d:%s: Entry inserted independently marked as collective.\n",
world_mpi_rank, __func__);
}
}
@@ -6772,6 +6788,8 @@ smoke_check_6(int metadata_write_strategy)
/* Make sure coll entries do not cross the 80% threshold */
HDassert((double)cache_ptr->max_cache_size * 0.8 > cache_ptr->coll_list_size);
}
+ /* Restore collective metadata reads state */
+ H5F_set_coll_metadata_reads(file_ptr, &md_reads_file_flag, &md_reads_context_flag);
for (i = 0; i < (virt_num_data_entries); i++) {
unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index feb4325..4229a77 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -477,7 +477,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* create the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE, dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
@@ -766,7 +766,7 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
while ( ( pass ) && ( i <= max_dset ) )
{
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
@@ -1334,7 +1334,7 @@ par_create_dataset(int dset_num, hid_t file_id, int mpi_rank, int mpi_size)
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1707,7 +1707,7 @@ par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank)
show_progress = (show_progress && (mpi_rank == 0));
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1840,7 +1840,7 @@ par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank)
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2208,7 +2208,7 @@ serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size)
hid_t dset_id = -1;
hid_t filespace_id = -1;
- HDsprintf(dset_name, "/dset%03d", dset_num);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", dset_num);
if (show_progress) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2460,7 +2460,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* open the dataset */
if (pass) {
- HDsprintf(dset_name, "/dset%03d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
if (dataset_ids[i] < 0) {
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index 104460a..20efaa1 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -832,7 +832,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap
VRFY((status >= 0), "dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
- if (facc_type == FACC_MPIO) {
+ /* Only check chunk optimization mode if selection I/O is not being used -
+ * selection I/O bypasses this IO mode decision - it's effectively always
+ * multi chunk currently */
+ if (facc_type == FACC_MPIO && !H5_use_selection_io_g) {
switch (api_option) {
case API_LINK_HARD:
status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index 66f3151..cabdea0 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -34,10 +34,9 @@
#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE 20000
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE 1
-#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
/*
* A test for issue HDFFV-10501. A parallel hang was reported which occurred
@@ -339,21 +338,34 @@ test_multi_chunk_io_addrmap_issue(void)
* collective metadata reads being made only by process 0 in H5D__sort_chunk().
*
* NOTE: Due to the way that the threshold value which pertains to this test
- * is currently calculated within HDF5, there are several conditions that this
- * test must maintain. Refer to the function H5D__sort_chunk in H5Dmpio.c for
- * a better idea of why.
+ * is currently calculated within HDF5, the following two conditions must be
+ * true to trigger the issue:
*
- * Condition 1: We need to make sure that the test always selects every single
- * chunk in the dataset. It is fine if the selection is split up among multiple
- * ranks, but their combined selection must cover the whole dataset.
+ * Condition 1: A certain threshold ratio must be met in order to have HDF5
+ * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is
+ * given by the following:
*
- * Condition 2: The number of chunks in the dataset divided by the number of MPI
- * ranks must exceed or equal 10000. In other words, each MPI rank must be
- * responsible for 10000 or more unique chunks.
+ * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30%
*
- * Condition 3: This test will currently only be reliably reproducible for 2 or 3
- * MPI ranks. The threshold value calculated reduces to a constant 100 / mpi_size,
- * and is compared against a default value of 30%.
+ * where:
+ * * `sum_chunk` is the combined sum of the number of chunks selected in
+ * the dataset by all ranks (chunks selected by more than one rank count
+ * individually toward the sum for each rank selecting that chunk)
+ * * `dataset_nchunks` is the number of chunks in the dataset (selected
+ * or not)
+ * * `mpi_size` is the size of the MPI Communicator
+ *
+ * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain
+ * threshold (as of this writing, 10000).
+ *
+ * To satisfy both these conditions, we #define a macro,
+ * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the
+ * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the
+ * 10000 threshold from condition 2). We then create a dataset of that many
+ * chunks and have each MPI rank write to and read from a piece of every single
+ * chunk in the dataset. This ensures chunk utilization is the max possible
+ * and exceeds our 30% target ratio, while always exactly matching the numeric
+ * chunk threshold value of condition 2.
*
* Failure in this test may either cause a hang, or, due to how the MPI calls
* pertaining to this issue might mistakenly match up, may cause an MPI error
@@ -375,10 +387,9 @@ void
test_link_chunk_io_sort_chunk_issue(void)
{
const char *filename;
- hsize_t * dataset_dims = NULL;
- hsize_t max_dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
- hsize_t sel_dims[1];
- hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS] = {LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS};
+ hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
@@ -412,14 +423,13 @@ test_link_chunk_io_sort_chunk_issue(void)
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
VRFY((file_id >= 0), "H5Fcreate succeeded");
- dataset_dims = HDmalloc(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS * sizeof(*dataset_dims));
- VRFY((dataset_dims != NULL), "malloc succeeded");
-
- dataset_dims[0] = (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * (hsize_t)mpi_size *
- (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
- max_dataset_dims[0] = H5S_UNLIMITED;
+ /*
+ * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM
+ * chunks, where every rank writes to a piece of every single chunk to keep utilization high.
+ */
+ dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
- fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, max_dataset_dims);
+ fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL);
VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
/*
@@ -428,6 +438,9 @@ test_link_chunk_io_sort_chunk_issue(void)
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+ /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */
+ chunk_dims[0] = (hsize_t)mpi_size;
+
VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0),
"H5Pset_chunk succeeded");
@@ -437,23 +450,21 @@ test_link_chunk_io_sort_chunk_issue(void)
/*
* Setup hyperslab selection to split the dataset among the ranks.
- *
- * The ranks will write rows across the dataset.
*/
- stride[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
- count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / (hsize_t)mpi_size;
- start[0] = count[0] * (hsize_t)mpi_rank;
- block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+ start[0] = (hsize_t)mpi_rank;
+ stride[0] = (hsize_t)mpi_size;
+ count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
+ block[0] = 1;
VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
"H5Sselect_hyperslab succeeded");
- sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+ sel_dims[0] = count[0];
mspace_id = H5Screate_simple(1, sel_dims, NULL);
VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
- data = HDcalloc(1, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ data = HDcalloc(1, count[0] * sizeof(int));
VRFY((data != NULL), "calloc succeeded");
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
@@ -476,33 +487,25 @@ test_link_chunk_io_sort_chunk_issue(void)
VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
"H5Pset_dxpl_mpio_chunk_opt succeeded");
- read_buf = HDmalloc(count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ read_buf = HDmalloc(count[0] * sizeof(int));
VRFY((read_buf != NULL), "malloc succeeded");
VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
"H5Sselect_hyperslab succeeded");
- sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+ sel_dims[0] = count[0];
VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
mspace_id = H5Screate_simple(1, sel_dims, NULL);
VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
- read_buf = HDrealloc(read_buf, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
- VRFY((read_buf != NULL), "realloc succeeded");
-
/*
* Finally have each rank read their section of data back from the dataset.
*/
VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
"H5Dread succeeded");
- if (dataset_dims) {
- HDfree(dataset_dims);
- dataset_dims = NULL;
- }
-
if (data) {
HDfree(data);
data = NULL;
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 9f922e3..eb11b32 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2605,7 +2605,7 @@ compress_readAll(void)
nerrors++;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dwrite succeeded");
#endif
@@ -3202,7 +3202,7 @@ test_actual_io_mode(int selection_mode)
/* Set the threshold number of processes per chunk to twice mpi_size.
* This will prevent the threshold from ever being met, thus forcing
* multi chunk io instead of link chunk io.
- * This is via deault.
+ * This is via default.
*/
if (multi_chunk_io) {
/* force multi-chunk-io by threshold */
@@ -3351,32 +3351,38 @@ actual_io_mode_tests(void)
int mpi_size = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
+ /* Only run these tests if selection I/O is not being used - selection I/O
+ * bypasses this IO mode decision - it's effectively always multi chunk
+ * currently */
+ if (!H5_use_selection_io_g) {
+ test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
- /*
- * Test multi-chunk-io via proc_num threshold
- */
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
+ /*
+ * Test multi-chunk-io via proc_num threshold
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
- /* The Multi Chunk Mixed test requires at least three processes. */
- if (mpi_size > 2)
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
- else
- HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
+ /* The Multi Chunk Mixed test requires at least three processes. */
+ if (mpi_size > 2)
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
+ else
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
- test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
- /*
- * Test multi-chunk-io via setting direct property
- */
- test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
- test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+ /*
+ * Test multi-chunk-io via setting direct property
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
- test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
- test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
+ test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
+ test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_RESET);
+ }
- test_actual_io_mode(TEST_ACTUAL_IO_RESET);
return;
}
@@ -3418,12 +3424,6 @@ actual_io_mode_tests(void)
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
* Test for Externl-File storage as the cause of breaking collective I/O.
*
- * TEST_FILTERS:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter
- * feature. Use test_no_collective_cause_mode_filter() function instead.
- *
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
@@ -3465,9 +3465,6 @@ test_no_collective_cause_mode(int selection_mode)
hid_t file_space = -1;
hsize_t chunk_dims[RANK];
herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
/* set to global value as default */
int l_facc_type = facc_type;
char message[256];
@@ -3499,21 +3496,6 @@ test_no_collective_cause_mode(int selection_mode)
is_chunked = 0;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY((ret >= 0), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, &filter_info);
- VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
- (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
- "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0), "set filter (flecher32) succeeded");
- }
-#endif /* LATER */
-
if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
sid = H5Screate(H5S_NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -3589,14 +3571,6 @@ test_no_collective_cause_mode(int selection_mode)
no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
}
-#ifdef LATER /* fletcher32 */
- if (selection_mode & TEST_FILTERS) {
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected |= H5D_MPIO_FILTERS;
- no_collective_cause_global_expected |= H5D_MPIO_FILTERS;
- }
-#endif /* LATER */
-
if (selection_mode & TEST_COLLECTIVE) {
test_name = "Broken Collective I/O - Not Broken";
no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
@@ -3699,10 +3673,12 @@ test_no_collective_cause_mode(int selection_mode)
/* Test values */
HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ HDsnprintf(message, sizeof(message),
+ "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ HDsnprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3733,240 +3709,6 @@ test_no_collective_cause_mode(int selection_mode)
return;
}
-/*
- * Function: test_no_collective_cause_mode_filter
- *
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
- * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
- * have the correct values.
- *
- * NOTE:
- * This is a temporary function.
- * test_no_collective_cause_mode(TEST_FILTERS) will replace this when
- * H5Dcreate and H5write support for mpio and filter feature.
- *
- * Input:
- * TEST_FILTERS_READ:
- * Test for using filter (checksum) as the cause of breaking collective I/O.
- *
- * Programmer: Jonathan Kim
- * Date: Aug, 2012
- */
-#ifdef LATER
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
-{
- uint32_t no_collective_cause_local_read = 0;
- uint32_t no_collective_cause_local_expected = 0;
- uint32_t no_collective_cause_global_read = 0;
- uint32_t no_collective_cause_global_expected = 0;
-
- const char *filename;
- const char *test_name = "I/O";
- hbool_t is_chunked = 1;
- int mpi_size = -1;
- int mpi_rank = -1;
- int length;
- int * buffer;
- int i;
- MPI_Comm mpi_comm = MPI_COMM_NULL;
- MPI_Info mpi_info = MPI_INFO_NULL;
- hid_t fid = -1;
- hid_t sid = -1;
- hid_t dataset = -1;
- hid_t data_type = H5T_NATIVE_INT;
- hid_t fapl_write = -1;
- hid_t fapl_read = -1;
- hid_t dcpl = -1;
- hid_t dxpl = -1;
- hsize_t dims[RANK];
- hid_t mem_space = -1;
- hid_t file_space = -1;
- hsize_t chunk_dims[RANK];
- herr_t ret;
-#ifdef LATER /* fletcher32 */
- H5Z_filter_t filter_info;
-#endif /* LATER */
- char message[256];
-
- /* Set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
-
- MPI_Barrier(MPI_COMM_WORLD);
-
- HDassert(mpi_size >= 1);
-
- mpi_comm = MPI_COMM_WORLD;
- mpi_info = MPI_INFO_NULL;
-
- /* Create the dataset creation plist */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((dcpl >= 0), "dataset creation plist created successfully");
-
- if (selection_mode == TEST_FILTERS_READ) {
-#ifdef LATER /* fletcher32 */
- ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
- VRFY((ret >= 0), "Fletcher32 filter is available.\n");
-
- ret = H5Zget_filter_info(H5Z_FILTER_FLETCHER32, (unsigned int *)&filter_info);
- VRFY(((filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
- (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED)),
- "Fletcher32 filter encoding and decoding available.\n");
-
- ret = H5Pset_fletcher32(dcpl);
- VRFY((ret >= 0), "set filter (flecher32) succeeded");
-#endif /* LATER */
- }
- else {
- VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
- }
-
- /* Create the basic Space */
- dims[0] = (hsize_t)dim0;
- dims[1] = (hsize_t)dim1;
- sid = H5Screate_simple(RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
-
- filename = (const char *)GetTestParameters();
- HDassert(filename != NULL);
-
- /* Setup the file access template */
- fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT);
- VRFY((fapl_write >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* If we are not testing contiguous datasets */
- if (is_chunked) {
- /* Set up chunk information. */
- chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
- chunk_dims[1] = dims[1];
- ret = H5Pset_chunk(dcpl, 2, chunk_dims);
- VRFY((ret >= 0), "chunk creation property list succeeded");
- }
-
- /* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
-
-#ifdef LATER /* fletcher32 */
- /* Set expected cause */
- test_name = "Broken Collective I/O - Filter is required";
- no_collective_cause_local_expected = H5D_MPIO_FILTERS;
- no_collective_cause_global_expected = H5D_MPIO_FILTERS;
-#endif /* LATER */
-
- /* Get the file dataspace */
- file_space = H5Dget_space(dataset);
- VRFY((file_space >= 0), "H5Dget_space succeeded");
-
- /* Create the memory dataspace */
- mem_space = H5Screate_simple(RANK, dims, NULL);
- VRFY((mem_space >= 0), "mem_space created");
-
- /* Get the number of elements in the selection */
- length = dim0 * dim1;
-
- /* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for (i = 0; i < length; i++)
- buffer[i] = i;
-
- /* Set up the dxpl for the write */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
- if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
- * because write fails with mpio + filter */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
- else {
- /* To test write in collective I/O mode. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- }
-
- /* Write */
- ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
-
- /* Make a copy of the dxpl to test the read operation */
- dxpl = H5Pcopy(dxpl);
- VRFY((dxpl >= 0), "H5Pcopy succeeded");
-
- if (dataset)
- H5Dclose(dataset);
- if (fapl_write)
- H5Pclose(fapl_write);
- if (fid)
- H5Fclose(fid);
-
- /*---------------------
- * Test Read access
- *---------------------*/
-
- /* Setup the file access template */
- fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
- VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
-
- fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl_read);
- dataset = H5Dopen2(fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
-
- /* Set collective I/O properties in the dxpl. */
- ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Read */
- ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
-
- if (ret < 0)
- H5Eprint2(H5E_DEFAULT, stdout);
- VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
- /* Get the cause of broken collective I/O */
- ret = H5Pget_mpio_no_collective_cause(dxpl, &no_collective_cause_local_read,
- &no_collective_cause_global_read);
- VRFY((ret >= 0), "retrieving no collective cause succeeded");
-
- /* Test values */
- HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
- VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- HDmemset(message, 0, sizeof(message));
- HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
- VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
-
- /* Release some resources */
- if (sid)
- H5Sclose(sid);
- if (fapl_read)
- H5Pclose(fapl_read);
- if (dcpl)
- H5Pclose(dcpl);
- if (dxpl)
- H5Pclose(dxpl);
- if (dataset)
- H5Dclose(dataset);
- if (mem_space)
- H5Sclose(mem_space);
- if (file_space)
- H5Sclose(file_space);
- if (fid)
- H5Fclose(fid);
- HDfree(buffer);
- return;
-}
-#endif
-
/* Function: no_collective_cause_tests
*
* Purpose: Tests cases for broken collective IO.
@@ -3987,13 +3729,6 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
-#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
- * H5Dwrite is ready for mpio + filter feature.
- */
- /* test_no_collective_cause_mode (TEST_FILTERS); */
- test_no_collective_cause_mode_filter(TEST_FILTERS_READ);
-#endif /* LATER */
/*
* Test combined causes
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 6c6ac69..229fd6c 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -514,25 +514,25 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsprintf(dset_name, "D1dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D2dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D3dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -555,13 +555,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
for (i = 0; i < num_elements; i++)
VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
- HDsprintf(dset_name, "D1dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D2dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- HDsprintf(dset_name, "D3dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
}
@@ -667,7 +667,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t
VRFY((mem_dataspace >= 0), "");
for (k = 0; k < NUM_DSETS; k++) {
- HDsprintf(dset_name, "dset%d", k);
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index 78af0fb..8a55519 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -26,73 +26,139 @@
const char *FILENAME[] = {"t_filters_parallel", NULL};
char filenames[1][256];
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+static int mpi_rank;
+static int mpi_size;
+
int nerrors = 0;
-size_t cur_filter_idx = 0;
-#define GZIP_INDEX 0
-#define FLETCHER32_INDEX 1
+/* Arrays of filter ID values and filter names (should match each other) */
+H5Z_filter_t filterIDs[] = {
+ H5Z_FILTER_DEFLATE, H5Z_FILTER_SHUFFLE, H5Z_FILTER_FLETCHER32,
+ H5Z_FILTER_SZIP, H5Z_FILTER_NBIT, H5Z_FILTER_SCALEOFFSET,
+};
+
+const char *filterNames[] = {"Deflate", "Shuffle", "Fletcher32", "SZIP", "Nbit", "ScaleOffset"};
-#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+/* Function pointer typedef for test functions */
+typedef void (*test_func)(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id);
+
+/* Typedef for filter arguments for user-defined filters */
+typedef struct filter_options_t {
+ unsigned int flags;
+ size_t cd_nelmts;
+ const unsigned int cd_values[];
+} filter_options_t;
/*
- * Used to check if a filter is available before running a test.
+ * Enum for verify_space_alloc_status which specifies
+ * how many chunks have been written to in a dataset
*/
-#define CHECK_CUR_FILTER_AVAIL() \
- { \
- htri_t filter_is_avail; \
- \
- if (cur_filter_idx == GZIP_INDEX) { \
- if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
- if (MAINPROCESS) { \
- HDputs(" - SKIPPED - Deflate filter not available"); \
- } \
- return; \
- } \
- } \
- }
+typedef enum num_chunks_written_t {
+ DATASET_JUST_CREATED,
+ NO_CHUNKS_WRITTEN,
+ SOME_CHUNKS_WRITTEN,
+ ALL_CHUNKS_WRITTEN
+} num_chunks_written_t;
-static herr_t set_dcpl_filter(hid_t dcpl);
+static herr_t set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options);
+static herr_t verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written);
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/* Tests for writing data in parallel */
-static void test_write_one_chunk_filtered_dataset(void);
-static void test_write_filtered_dataset_no_overlap(void);
-static void test_write_filtered_dataset_overlap(void);
-static void test_write_filtered_dataset_single_no_selection(void);
-static void test_write_filtered_dataset_all_no_selection(void);
-static void test_write_filtered_dataset_point_selection(void);
-static void test_write_filtered_dataset_interleaved_write(void);
-static void test_write_transformed_filtered_dataset_no_overlap(void);
-static void test_write_3d_filtered_dataset_no_overlap_separate_pages(void);
-static void test_write_3d_filtered_dataset_no_overlap_same_pages(void);
-static void test_write_3d_filtered_dataset_overlap(void);
-static void test_write_cmpd_filtered_dataset_no_conversion_unshared(void);
-static void test_write_cmpd_filtered_dataset_no_conversion_shared(void);
-static void test_write_cmpd_filtered_dataset_type_conversion_unshared(void);
-static void test_write_cmpd_filtered_dataset_type_conversion_shared(void);
+static void test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_single_unlim_dim_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_single_unlim_dim_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_multi_unlim_dim_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_transformed_filtered_dataset_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_3d_filtered_dataset_no_overlap_separate_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_3d_filtered_dataset_no_overlap_same_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_no_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_no_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_type_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_write_cmpd_filtered_dataset_type_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
#endif
/* Tests for reading data in parallel */
-static void test_read_one_chunk_filtered_dataset(void);
-static void test_read_filtered_dataset_no_overlap(void);
-static void test_read_filtered_dataset_overlap(void);
-static void test_read_filtered_dataset_single_no_selection(void);
-static void test_read_filtered_dataset_all_no_selection(void);
-static void test_read_filtered_dataset_point_selection(void);
-static void test_read_filtered_dataset_interleaved_read(void);
-static void test_read_transformed_filtered_dataset_no_overlap(void);
-static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void);
-static void test_read_3d_filtered_dataset_no_overlap_same_pages(void);
-static void test_read_3d_filtered_dataset_overlap(void);
-static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void);
-static void test_read_cmpd_filtered_dataset_no_conversion_shared(void);
-static void test_read_cmpd_filtered_dataset_type_conversion_unshared(void);
-static void test_read_cmpd_filtered_dataset_type_conversion_shared(void);
-
-#if MPI_VERSION >= 3
-/* Other miscellaneous tests */
-static void test_shrinking_growing_chunks(void);
-#endif
+static void test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_transformed_filtered_dataset_no_overlap(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_3d_filtered_dataset_no_overlap_separate_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_3d_filtered_dataset_no_overlap_same_pages(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_no_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_no_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_type_conversion_unshared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_read_cmpd_filtered_dataset_type_conversion_shared(const char * parent_group,
+ H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
/*
* Tests for attempting to round-trip the data going from
@@ -103,21 +169,40 @@ static void test_shrinking_growing_chunks(void);
*
* written in parallel -> read serially
*/
-static void test_write_serial_read_parallel(void);
-#if MPI_VERSION >= 3
-static void test_write_parallel_read_serial(void);
-#endif
+static void test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
-static MPI_Comm comm = MPI_COMM_WORLD;
-static MPI_Info info = MPI_INFO_NULL;
-static int mpi_rank;
-static int mpi_size;
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
+static void test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
-static void (*tests[])(void) = {
-#if MPI_VERSION >= 3
+/* Other miscellaneous tests */
+static void test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id);
+static void test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+static void test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id);
+#endif
+
+static test_func tests[] = {
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
test_write_one_chunk_filtered_dataset,
test_write_filtered_dataset_no_overlap,
+ test_write_filtered_dataset_no_overlap_partial,
test_write_filtered_dataset_overlap,
+ test_write_filtered_dataset_single_unlim_dim_no_overlap,
+ test_write_filtered_dataset_single_unlim_dim_overlap,
+ test_write_filtered_dataset_multi_unlim_dim_no_overlap,
+ test_write_filtered_dataset_multi_unlim_dim_overlap,
test_write_filtered_dataset_single_no_selection,
test_write_filtered_dataset_all_no_selection,
test_write_filtered_dataset_point_selection,
@@ -147,33 +232,168 @@ static void (*tests[])(void) = {
test_read_cmpd_filtered_dataset_type_conversion_unshared,
test_read_cmpd_filtered_dataset_type_conversion_shared,
test_write_serial_read_parallel,
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
test_write_parallel_read_serial,
test_shrinking_growing_chunks,
+ test_edge_chunks_no_overlap,
+ test_edge_chunks_overlap,
+ test_edge_chunks_partial_write,
+ test_fill_values,
+ test_fill_value_undefined,
+ test_fill_time_never,
#endif
};
/*
* Function to call the appropriate HDF5 filter-setting function
- * depending on the currently set index. Used to re-run the tests
+ * depending on the given filter ID. Used to re-run the tests
* with different filters to check that the data still comes back
* correctly under a variety of circumstances, such as the
* Fletcher32 checksum filter increasing the size of the chunk.
*/
static herr_t
-set_dcpl_filter(hid_t dcpl)
+set_dcpl_filter(hid_t dcpl_id, H5Z_filter_t filter_id, filter_options_t *filter_options)
+{
+ switch (filter_id) {
+ case H5Z_FILTER_DEFLATE:
+ return H5Pset_deflate(dcpl_id, DEFAULT_DEFLATE_LEVEL);
+ case H5Z_FILTER_SHUFFLE:
+ return H5Pset_shuffle(dcpl_id);
+ case H5Z_FILTER_FLETCHER32:
+ return H5Pset_fletcher32(dcpl_id);
+ case H5Z_FILTER_SZIP: {
+ unsigned pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK;
+ hsize_t chunk_dims[H5S_MAX_RANK] = {0};
+ size_t i, chunk_nelemts;
+
+ VRFY(H5Pget_chunk(dcpl_id, H5S_MAX_RANK, chunk_dims) >= 0, "H5Pget_chunk succeeded");
+
+ for (i = 0, chunk_nelemts = 1; i < H5S_MAX_RANK; i++)
+ if (chunk_dims[i] > 0)
+ chunk_nelemts *= chunk_dims[i];
+
+ if (chunk_nelemts < H5_SZIP_MAX_PIXELS_PER_BLOCK) {
+ /*
+ * Can't set SZIP for chunk of 1 data element.
+ * Pixels-per-block value must be both even
+ * and non-zero.
+ */
+ if (chunk_nelemts == 1)
+ return SUCCEED;
+
+ if ((chunk_nelemts % 2) == 0)
+ pixels_per_block = (unsigned)chunk_nelemts;
+ else
+ pixels_per_block = (unsigned)(chunk_nelemts - 1);
+ }
+ else
+ pixels_per_block = H5_SZIP_MAX_PIXELS_PER_BLOCK;
+
+ return H5Pset_szip(dcpl_id, 0, pixels_per_block);
+ }
+ case H5Z_FILTER_NBIT:
+ return H5Pset_nbit(dcpl_id);
+ case H5Z_FILTER_SCALEOFFSET:
+ return H5Pset_scaleoffset(dcpl_id, H5Z_SO_INT, 0);
+ default: {
+ if (!filter_options)
+ return FAIL;
+
+ return H5Pset_filter(dcpl_id, filter_id, filter_options->flags, filter_options->cd_nelmts,
+ filter_options->cd_values);
+ }
+ }
+}
+
+/*
+ * Function to verify the status of dataset storage space allocation
+ * based on the dataset's allocation time setting and how many chunks
+ * in the dataset have been written to.
+ */
+static herr_t
+verify_space_alloc_status(hid_t dset_id, hid_t dcpl_id, num_chunks_written_t chunks_written)
{
- switch (cur_filter_idx) {
- case GZIP_INDEX:
- return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
- case FLETCHER32_INDEX:
- return H5Pset_fletcher32(dcpl);
- default:
- return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
+ int nfilters;
+ herr_t ret_value = SUCCEED;
+
+ VRFY(((nfilters = H5Pget_nfilters(dcpl_id)) >= 0), "H5Pget_nfilters succeeded");
+
+ /*
+ * Only verify space allocation status when there are filters
+ * in the dataset's filter pipeline. When filters aren't in the
+ * pipeline, the space allocation time and status can vary based
+ * on whether the file was created in parallel or serial mode.
+ */
+ if (nfilters > 0) {
+ H5D_space_status_t space_status;
+ H5D_alloc_time_t alloc_time;
+
+ VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
+ VRFY((H5Dget_space_status(dset_id, &space_status) >= 0), "H5Dget_space_status succeeded");
+
+ switch (alloc_time) {
+ case H5D_ALLOC_TIME_EARLY:
+ /*
+ * Early space allocation should always result in the
+ * full dataset storage space being allocated.
+ */
+ VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status");
+ break;
+ case H5D_ALLOC_TIME_LATE:
+ /*
+ * Late space allocation should always result in the
+ * full dataset storage space being allocated when
+ * the dataset gets written to. However, if the dataset
+ * is extended the dataset's space allocation status
+ * can become partly allocated until the dataset is
+ * written to again.
+ */
+ if (chunks_written == SOME_CHUNKS_WRITTEN || chunks_written == ALL_CHUNKS_WRITTEN)
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED) ||
+ (space_status == H5D_SPACE_STATUS_PART_ALLOCATED),
+ "verified space allocation status");
+ else if (chunks_written == NO_CHUNKS_WRITTEN)
+ /*
+ * A special case where we wrote to a dataset that
+ * uses late space allocation, but the write was
+ * either a no-op (no selection in the dataset
+ * from any rank) or something caused the write to
+ * fail late in the process of performing the actual
+ * write. In either case, space should still have
+ * been allocated.
+ */
+ VRFY(space_status == H5D_SPACE_STATUS_ALLOCATED, "verified space allocation status");
+ else
+ VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status");
+ break;
+ case H5D_ALLOC_TIME_DEFAULT:
+ case H5D_ALLOC_TIME_INCR:
+ /*
+ * Incremental space allocation should result in
+ * the dataset's storage space being incrementally
+ * allocated as chunks are written to. Once all chunks
+ * have been written to, the space allocation should be
+ * seen as fully allocated.
+ */
+ if (chunks_written == SOME_CHUNKS_WRITTEN)
+ VRFY((space_status == H5D_SPACE_STATUS_PART_ALLOCATED),
+ "verified space allocation status");
+ else if (chunks_written == ALL_CHUNKS_WRITTEN)
+ VRFY((space_status == H5D_SPACE_STATUS_ALLOCATED), "verified space allocation status");
+ else
+ VRFY(space_status == H5D_SPACE_STATUS_NOT_ALLOCATED, "verified space allocation status");
+ break;
+ default:
+ if (MAINPROCESS)
+ MESG("unknown space allocation time");
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
}
+
+ return ret_value;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/*
* Tests parallel write of filtered data in the special
* case where a dataset is composed of a single chunk.
@@ -182,7 +402,8 @@ set_dcpl_filter(hid_t dcpl)
* 02/01/2017
*/
static void
-test_write_one_chunk_filtered_dataset(void)
+test_write_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -195,26 +416,18 @@ test_write_one_chunk_filtered_dataset(void)
hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to one-chunk filtered dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS;
@@ -231,19 +444,21 @@ test_write_one_chunk_filtered_dataset(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -293,15 +508,12 @@ test_write_one_chunk_filtered_dataset(void)
((C_DATATYPE)i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size *
WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -311,10 +523,10 @@ test_write_one_chunk_filtered_dataset(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -324,10 +536,11 @@ test_write_one_chunk_filtered_dataset(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -343,7 +556,8 @@ test_write_one_chunk_filtered_dataset(void)
* 02/01/2017
*/
static void
-test_write_filtered_dataset_no_overlap(void)
+test_write_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -356,27 +570,18 @@ test_write_filtered_dataset_no_overlap(void)
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_NROWS;
@@ -393,20 +598,22 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -454,15 +661,168 @@ test_write_filtered_dataset_no_overlap(void)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
(i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ if (data)
+ HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
+ if (correct_buf)
+ HDfree(correct_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where only
+ * one process is writing to a particular chunk in the operation
+ * and that process only writes to part of a chunk.
+ */
+static void
+test_write_filtered_dataset_no_overlap_partial(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing partial write to unshared filtered chunks");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ sel_dims[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS /
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS);
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t)(WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS /
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS);
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS;
+ block[1] = (hsize_t)1;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t rank_n_elems = (size_t)(mpi_size * (WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS *
+ WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS));
+ size_t data_idx = i;
+
+ for (size_t j = 0; j < rank_n_elems; j++) {
+ if ((j % WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS) == 0) {
+ correct_buf[(i * rank_n_elems) + j] = (C_DATATYPE)data_idx;
+ data_idx++;
+ }
+ }
+ }
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -472,10 +832,10 @@ test_write_filtered_dataset_no_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -485,10 +845,10 @@ test_write_filtered_dataset_no_overlap(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
- VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -505,7 +865,8 @@ test_write_filtered_dataset_no_overlap(void)
* 02/01/2017
*/
static void
-test_write_filtered_dataset_overlap(void)
+test_write_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -518,27 +879,18 @@ test_write_filtered_dataset_overlap(void)
hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to shared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_NROWS;
@@ -555,20 +907,22 @@ test_write_filtered_dataset_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -616,15 +970,12 @@ test_write_filtered_dataset_overlap(void)
(dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
(((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -634,10 +985,10 @@ test_write_filtered_dataset_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -647,10 +998,650 @@ test_write_filtered_dataset_overlap(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has a single unlimited dimension and each
+ * MPI rank writes to its own separate chunk. On each
+ * iteration, the dataset is extended in its extensible
+ * dimension by "MPI size" chunks per rank and the new
+ * chunks are written to, read back and verified.
+ */
+static void
+test_write_filtered_dataset_single_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks w/ single unlimited dimension");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS;
+ max_dims[0] = dataset_dims[0];
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] =
+ (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS / (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * block[0] * count[0]);
+ start[1] = i * count[1] * block[1];
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify the correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
+ /* Extend the dataset by count[1] chunks in the extensible dimension */
+ dataset_dims[1] += count[1] * block[1];
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has a single unlimited dimension and each
+ * MPI rank writes to a portion of each chunk in the dataset.
+ * On each iteration, the dataset is extended in its extensible
+ * dimension by two chunks and the new chunks are written to
+ * by all ranks, then read back and verified.
+ */
+static void
+test_write_filtered_dataset_single_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks w/ single unlimited dimension");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NCOLS;
+ max_dims[0] = dataset_dims[0];
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ for (i = 0; i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS; i++) {
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NROWS / (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS;
+ count[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_NCOLS / (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ stride[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = i * count[1] * block[1];
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS - 1) {
+ /* Extend the dataset by count[1] chunks in the extensible dimension */
+ dataset_dims[1] += count[1] * block[1];
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has two unlimited dimensions and each
+ * MPI rank writes to its own separate chunks. On each
+ * iteration, the dataset is extended in its first
+ * extensible dimension by the size of one chunk per rank
+ * and in its second extensible dimension by the size of
+ * one chunk. Then, all chunks are written to, read back
+ * and verified.
+ */
+static void
+test_write_filtered_dataset_multi_unlim_dim_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered chunks w/ two unlimited dimensions");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS;
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ for (i = 0; i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
+ C_DATATYPE *tmp_realloc = NULL;
+ size_t j;
+
+ /* Set selected dimensions */
+ sel_dims[0] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ sel_dims[1] = (i + 1) * WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(data, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ data = tmp_realloc;
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(read_buf, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ read_buf = tmp_realloc;
+
+ for (j = 0; j < data_size / sizeof(*data); j++)
+ data[j] = (C_DATATYPE)GEN_DATA(j);
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (i + 1);
+ count[1] = (i + 1);
+ stride[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * block[0] * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify the correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
+ /*
+ * Extend the dataset by the size of one chunk per rank
+ * in the first extensible dimension. Extend the dataset
+ * by the size of chunk in the second extensible dimension.
+ */
+ dataset_dims[0] += (hsize_t)mpi_size * block[0];
+ dataset_dims[1] += block[1];
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a dataset has two unlimited dimensions and each MPI
+ * rank writes to a portion of each chunk in the dataset.
+ * On each iteration, the dataset is extended in its extensible
+ * dimensions by the size of a chunk and then all chunks are
+ * written to by all ranks, then read back and verified.
+ */
+static void
+test_write_filtered_dataset_multi_unlim_dim_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t max_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered chunks w/ two unlimited dimensions");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_NCOLS;
+ max_dims[0] = H5S_UNLIMITED;
+ max_dims[1] = H5S_UNLIMITED;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS, dataset_dims, max_dims);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ for (i = 0; i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS; i++) {
+ C_DATATYPE *tmp_realloc = NULL;
+ size_t j;
+
+ /* Set selected dimensions */
+ sel_dims[0] = (i + 1);
+ sel_dims[1] = (i + 1) * (size_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(data, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ data = tmp_realloc;
+
+ tmp_realloc = (C_DATATYPE *)HDrealloc(read_buf, data_size);
+ VRFY((NULL != tmp_realloc), "HDrealloc succeeded");
+ read_buf = tmp_realloc;
+
+ for (j = 0; j < data_size / sizeof(*data); j++)
+ data[j] = (C_DATATYPE)GEN_DATA(j);
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (i + 1);
+ count[1] = (i + 1);
+ stride[0] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0],
+ block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /* Verify correct data was written */
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (i < (size_t)WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS - 1) {
+ /* Extend the dataset by the size of a chunk in each extensible dimension */
+ dataset_dims[0] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS;
+ dataset_dims[1] += (hsize_t)WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS;
+ VRFY(H5Dset_extent(dset_id, dataset_dims) >= 0, "H5Dset_extent succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+ }
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ }
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -669,7 +1660,8 @@ test_write_filtered_dataset_overlap(void)
* 02/01/2017
*/
static void
-test_write_filtered_dataset_single_no_selection(void)
+test_write_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -683,27 +1675,18 @@ test_write_filtered_dataset_single_no_selection(void)
hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
size_t segment_length;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to filtered chunks with a single process having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
@@ -723,20 +1706,22 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -774,15 +1759,17 @@ test_write_filtered_dataset_single_no_selection(void)
data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
- data = (C_DATATYPE *)HDcalloc(1, data_size);
- VRFY((NULL != data), "HDcalloc succeeded");
+ if (mpi_rank != WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+ }
correct_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != correct_buf), "HDcalloc succeeded");
- for (i = 0; i < data_size / sizeof(*data); i++)
- data[i] = (C_DATATYPE)GEN_DATA(i);
-
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
(i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
@@ -793,15 +1780,12 @@ test_write_filtered_dataset_single_no_selection(void)
((size_t)WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
0, segment_length * sizeof(*data));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status - data should only have been written if MPI size > 1 */
+ verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1 ? SOME_CHUNKS_WRITTEN : NO_CHUNKS_WRITTEN));
+
if (data)
HDfree(data);
@@ -811,10 +1795,10 @@ test_write_filtered_dataset_single_no_selection(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -824,10 +1808,11 @@ test_write_filtered_dataset_single_no_selection(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -847,7 +1832,8 @@ test_write_filtered_dataset_single_no_selection(void)
* 02/02/2017
*/
static void
-test_write_filtered_dataset_all_no_selection(void)
+test_write_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -856,27 +1842,18 @@ test_write_filtered_dataset_all_no_selection(void)
hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to filtered chunks with all processes having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
@@ -892,20 +1869,22 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
filespace = H5Dget_space(dset_id);
@@ -926,15 +1905,12 @@ test_write_filtered_dataset_all_no_selection(void)
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE)GEN_DATA(i);
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status - no ranks should have written any data */
+ verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -944,10 +1920,10 @@ test_write_filtered_dataset_all_no_selection(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -957,10 +1933,11 @@ test_write_filtered_dataset_all_no_selection(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -974,7 +1951,8 @@ test_write_filtered_dataset_all_no_selection(void)
* 02/02/2017
*/
static void
-test_write_filtered_dataset_point_selection(void)
+test_write_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -985,27 +1963,18 @@ test_write_filtered_dataset_point_selection(void)
hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, j, data_size, correct_buf_size;
size_t num_points;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to filtered chunks with point selection");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
@@ -1022,20 +1991,22 @@ test_write_filtered_dataset_point_selection(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Set up point selection */
@@ -1075,15 +2046,12 @@ test_write_filtered_dataset_point_selection(void)
(dataset_dims[1] * (i / ((hsize_t)mpi_size * dataset_dims[1]))) + (i % dataset_dims[1]) +
(((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1093,10 +2061,10 @@ test_write_filtered_dataset_point_selection(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1108,10 +2076,11 @@ test_write_filtered_dataset_point_selection(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1129,7 +2098,8 @@ test_write_filtered_dataset_point_selection(void)
* 02/02/2017
*/
static void
-test_write_filtered_dataset_interleaved_write(void)
+test_write_filtered_dataset_interleaved_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1142,27 +2112,18 @@ test_write_filtered_dataset_interleaved_write(void)
hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing interleaved write to filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NROWS;
@@ -1179,20 +2140,22 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1251,15 +2214,12 @@ test_write_filtered_dataset_interleaved_write(void)
+ ((hsize_t)INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS *
(i / (hsize_t)(mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS))));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1269,10 +2229,10 @@ test_write_filtered_dataset_interleaved_write(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1282,10 +2242,11 @@ test_write_filtered_dataset_interleaved_write(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1308,7 +2269,8 @@ test_write_filtered_dataset_interleaved_write(void)
* 08/20/2021
*/
static void
-test_write_transformed_filtered_dataset_no_overlap(void)
+test_write_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1321,27 +2283,18 @@ test_write_transformed_filtered_dataset_no_overlap(void)
hsize_t count[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared transformed and filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
@@ -1358,19 +2311,22 @@ test_write_transformed_filtered_dataset_no_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -1419,11 +2375,9 @@ test_write_transformed_filtered_dataset_no_overlap(void)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])) +
(i / (dataset_dims[0] / (hsize_t)mpi_size * dataset_dims[1])));
- /* Create property list for collective dataset write and data transform */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ /* Create property list for data transform */
+ plist_id = H5Pcopy(dxpl_id);
+ VRFY((plist_id >= 0), "DXPL copy succeeded");
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
@@ -1440,7 +2394,7 @@ test_write_transformed_filtered_dataset_no_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
@@ -1448,6 +2402,13 @@ test_write_transformed_filtered_dataset_no_overlap(void)
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+
+ /* Verify space allocation status */
+ plist_id = H5Dget_create_plist(dset_id);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (correct_buf)
HDfree(correct_buf);
if (read_buf)
@@ -1457,6 +2418,7 @@ test_write_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1471,7 +2433,8 @@ test_write_transformed_filtered_dataset_no_overlap(void)
* 02/06/2017
*/
static void
-test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
+test_write_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1484,27 +2447,18 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
@@ -1524,20 +2478,22 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
+ HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1591,15 +2547,12 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
correct_buf[i] = (C_DATATYPE)((i % (hsize_t)mpi_size) + (i / (hsize_t)mpi_size));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1609,10 +2562,10 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1622,10 +2575,11 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1641,7 +2595,8 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
* 02/06/2017
*/
static void
-test_write_3d_filtered_dataset_no_overlap_same_pages(void)
+test_write_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1654,27 +2609,18 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
@@ -1695,20 +2641,22 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1762,15 +2710,12 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
correct_buf[i] = (C_DATATYPE)((i % (dataset_dims[0] * dataset_dims[1])) +
(i / (dataset_dims[0] * dataset_dims[1])));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1780,10 +2725,10 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1793,10 +2738,11 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1812,7 +2758,8 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
* 02/06/2017
*/
static void
-test_write_3d_filtered_dataset_overlap(void)
+test_write_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -1825,27 +2772,18 @@ test_write_3d_filtered_dataset_overlap(void)
hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to shared filtered chunks in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS;
@@ -1865,20 +2803,22 @@ test_write_3d_filtered_dataset_overlap(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -1943,15 +2883,12 @@ test_write_3d_filtered_dataset_overlap(void)
(i / (hsize_t)(mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH *
WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))));
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -1961,10 +2898,10 @@ test_write_3d_filtered_dataset_overlap(void)
read_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -1974,10 +2911,11 @@ test_write_3d_filtered_dataset_overlap(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -1992,7 +2930,8 @@ test_write_3d_filtered_dataset_overlap(void)
* 02/10/2017
*/
static void
-test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
+test_write_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2005,28 +2944,27 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
@@ -2045,15 +2983,15 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2066,11 +3004,13 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype,
- filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
+ memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2124,13 +3064,10 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
correct_buf[i].field3 = (long)((i % dataset_dims[1]) + (i / dataset_dims[1]));
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
if (data)
HDfree(data);
@@ -2141,11 +3078,11 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2154,11 +3091,12 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2173,7 +3111,8 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
* 02/10/2017
*/
static void
-test_write_cmpd_filtered_dataset_no_conversion_shared(void)
+test_write_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2186,28 +3125,27 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id, dset_id, plist_id, memtype;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
@@ -2226,15 +3164,15 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2247,11 +3185,13 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
"Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2311,13 +3251,10 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
(((i % ((hsize_t)mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1]));
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) >= 0), "Dataset write succeeded");
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0), "Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
if (data)
HDfree(data);
@@ -2329,10 +3266,10 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((NULL != read_buf), "HDcalloc succeeded");
dset_id =
- H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2341,11 +3278,12 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2356,16 +3294,18 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
* chunks using a compound datatype which requires a
* datatype conversion.
*
- * NOTE: This test currently should fail because the
- * datatype conversion causes the parallel library to
- * break to independent I/O and this isn't allowed when
- * there are filters in the pipeline.
+ * NOTE: This test currently should fail for mpi_size > 1
+ * because the datatype conversion causes the parallel
+ * library to break to independent I/O and this isn't
+ * allowed when there are filters in the pipeline,
+ * unless there is only one MPI rank.
*
* Programmer: Jordan Henderson
* 02/07/2017
*/
static void
-test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
+test_write_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2378,28 +3318,33 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ /* Skip for MPI communicator size of 1 */
+ if (mpi_size == 1) {
+ SKIPPED();
+ return;
+ }
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
@@ -2418,15 +3363,15 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2447,11 +3392,13 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2497,20 +3444,16 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
data[i].field3 = (long)GEN_DATA(i);
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
/* Ensure that this test currently fails since type conversions break collective mode */
H5E_BEGIN_TRY
{
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
- "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded");
}
H5E_END_TRY;
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -2520,11 +3463,11 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2533,12 +3476,13 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2549,16 +3493,18 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
* chunks using a compound datatype which requires
* a datatype conversion.
*
- * NOTE: This test currently should fail because the
- * datatype conversion causes the parallel library to
- * break to independent I/O and this isn't allowed when
- * there are filters in the pipeline.
+ * NOTE: This test currently should fail for mpi_size > 1
+ * because the datatype conversion causes the parallel
+ * library to break to independent I/O and this isn't
+ * allowed when there are filters in the pipeline,
+ * unless there is only one MPI rank.
*
* Programmer: Jordan Henderson
* 02/10/2017
*/
static void
-test_write_cmpd_filtered_dataset_type_conversion_shared(void)
+test_write_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *data = NULL;
COMPOUND_C_DATATYPE *read_buf = NULL;
@@ -2571,28 +3517,33 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
size_t i, correct_buf_size;
- hid_t file_id, dset_id, plist_id, filetype, memtype;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs(
"Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ /* Skip for MPI communicator size of 1 */
+ if (mpi_size == 1) {
+ SKIPPED();
+ return;
+ }
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
@@ -2611,15 +3562,15 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
/* Create the compound type for memory. */
memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
@@ -2640,11 +3591,13 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0), "Datatype insertion succeeded");
- dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -2690,20 +3643,16 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
data[i].field3 = (long)GEN_DATA(i);
}
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
/* Ensure that this test currently fails since type conversions break collective mode */
H5E_BEGIN_TRY
{
- VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
- "Dataset write succeeded");
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, dxpl_id, data) < 0), "Dataset write succeeded");
}
H5E_END_TRY;
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, NO_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
@@ -2713,11 +3662,11 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -2726,12 +3675,13 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
if (read_buf)
HDfree(read_buf);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2751,7 +3701,8 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
* 05/14/2018
*/
static void
-test_read_one_chunk_filtered_dataset(void)
+test_read_one_chunk_filtered_dataset(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -2765,16 +3716,15 @@ test_read_one_chunk_filtered_dataset(void)
hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from one-chunk filtered dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
@@ -2802,6 +3752,9 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -2810,44 +3763,43 @@ test_read_one_chunk_filtered_dataset(void)
chunk_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t)mpi_size;
@@ -2887,18 +3839,12 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -2937,7 +3883,7 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -2956,7 +3902,8 @@ test_read_one_chunk_filtered_dataset(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_no_overlap(void)
+test_read_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -2970,16 +3917,15 @@ test_read_filtered_dataset_no_overlap(void)
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
@@ -3006,6 +3952,9 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3014,44 +3963,43 @@ test_read_filtered_dataset_no_overlap(void)
chunk_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
@@ -3091,18 +4039,12 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -3141,7 +4083,7 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3161,7 +4103,8 @@ test_read_filtered_dataset_no_overlap(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_overlap(void)
+test_read_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3175,16 +4118,15 @@ test_read_filtered_dataset_overlap(void)
hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from shared filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_NCOLS;
@@ -3211,6 +4153,9 @@ test_read_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3219,44 +4164,43 @@ test_read_filtered_dataset_overlap(void)
chunk_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
- H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
@@ -3296,18 +4240,12 @@ test_read_filtered_dataset_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -3362,7 +4300,7 @@ test_read_filtered_dataset_overlap(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3382,7 +4320,8 @@ test_read_filtered_dataset_overlap(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_single_no_selection(void)
+test_read_filtered_dataset_single_no_selection(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3397,16 +4336,15 @@ test_read_filtered_dataset_single_no_selection(void)
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
size_t segment_length;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from filtered chunks with a single process having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3437,6 +4375,9 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
@@ -3446,44 +4387,43 @@ test_read_filtered_dataset_single_no_selection(void)
chunk_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
@@ -3530,19 +4470,19 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
- "Dataset read succeeded");
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC) {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, NULL) >= 0),
+ "Dataset read succeeded");
+ }
+ else {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+ }
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -3588,7 +4528,7 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3609,7 +4549,8 @@ test_read_filtered_dataset_single_no_selection(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_all_no_selection(void)
+test_read_filtered_dataset_all_no_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3617,14 +4558,13 @@ test_read_filtered_dataset_all_no_selection(void)
hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
size_t read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing read from filtered chunks with all processes having no selection");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3646,6 +4586,9 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3654,44 +4597,43 @@ test_read_filtered_dataset_all_no_selection(void)
chunk_dims[0] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = sel_dims[1] = 0;
@@ -3705,20 +4647,16 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
+
if (read_buf)
HDfree(read_buf);
if (correct_buf)
@@ -3727,7 +4665,7 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3747,7 +4685,8 @@ test_read_filtered_dataset_all_no_selection(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_point_selection(void)
+test_read_filtered_dataset_point_selection(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *correct_buf = NULL;
C_DATATYPE *read_buf = NULL;
@@ -3759,16 +4698,15 @@ test_read_filtered_dataset_point_selection(void)
hsize_t flat_dims[1];
size_t i, j, read_buf_size, correct_buf_size;
size_t num_points;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from filtered chunks with point selection");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3795,6 +4733,9 @@ test_read_filtered_dataset_point_selection(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -3803,44 +4744,43 @@ test_read_filtered_dataset_point_selection(void)
chunk_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t)mpi_size;
@@ -3871,18 +4811,12 @@ test_read_filtered_dataset_point_selection(void)
VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t)num_points, (const hsize_t *)coords) >= 0),
"Point selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -3941,7 +4875,7 @@ test_read_filtered_dataset_point_selection(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -3964,7 +4898,8 @@ test_read_filtered_dataset_point_selection(void)
* 05/15/2018
*/
static void
-test_read_filtered_dataset_interleaved_read(void)
+test_read_filtered_dataset_interleaved_read(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -3978,16 +4913,15 @@ test_read_filtered_dataset_interleaved_read(void)
hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing interleaved read from filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
@@ -4023,6 +4957,9 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -4031,44 +4968,43 @@ test_read_filtered_dataset_interleaved_read(void)
chunk_dims[0] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
chunk_dims[1] = (hsize_t)INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)(INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
@@ -4110,18 +5046,12 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -4176,7 +5106,7 @@ test_read_filtered_dataset_interleaved_read(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4196,7 +5126,8 @@ test_read_filtered_dataset_interleaved_read(void)
* 05/16/2018
*/
static void
-test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
+test_read_3d_filtered_dataset_no_overlap_separate_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
MPI_Datatype vector_type;
MPI_Datatype resized_vector_type;
@@ -4212,14 +5143,13 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
@@ -4245,6 +5175,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
@@ -4255,45 +5188,44 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY(
(H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
@@ -4340,18 +5272,12 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -4392,7 +5318,7 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4419,7 +5345,8 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
* 08/20/2021
*/
static void
-test_read_transformed_filtered_dataset_no_overlap(void)
+test_read_transformed_filtered_dataset_no_overlap(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -4433,16 +5360,15 @@ test_read_transformed_filtered_dataset_no_overlap(void)
hsize_t block[READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared transformed and filtered chunks");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_NCOLS;
@@ -4469,6 +5395,9 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
@@ -4478,20 +5407,23 @@ test_read_transformed_filtered_dataset_no_overlap(void)
chunk_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY(
(H5Pset_chunk(plist_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
@@ -4505,26 +5437,26 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, correct_buf) >= 0),
"Dataset write succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+
+ /* Verify space allocation status */
+ plist_id = H5Dget_create_plist(dset_id);
+ VRFY((plist_id >= 0), "H5Dget_create_plist succeeded");
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_TRANSFORMED_FILTERED_CHUNKS_CH_NROWS;
@@ -4565,11 +5497,9 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read and data transform */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
+ /* Create property list for data transform */
+ plist_id = H5Pcopy(dxpl_id);
+ VRFY((plist_id >= 0), "DXPL copy succeeded");
/* Set data transform expression */
VRFY((H5Pset_data_transform(plist_id, "x") >= 0), "Set data transform expression succeeded");
@@ -4619,6 +5549,7 @@ test_read_transformed_filtered_dataset_no_overlap(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4639,7 +5570,8 @@ test_read_transformed_filtered_dataset_no_overlap(void)
* 05/16/2018
*/
static void
-test_read_3d_filtered_dataset_no_overlap_same_pages(void)
+test_read_3d_filtered_dataset_no_overlap_same_pages(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *read_buf = NULL;
C_DATATYPE *correct_buf = NULL;
@@ -4653,16 +5585,15 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
dataset_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
dataset_dims[2] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
@@ -4689,6 +5620,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace =
H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
@@ -4699,45 +5633,44 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
chunk_dims[1] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
chunk_dims[2] = 1;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >=
0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME,
HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
@@ -4783,18 +5716,12 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -4833,7 +5760,7 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -4854,7 +5781,8 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
* 05/16/2018
*/
static void
-test_read_3d_filtered_dataset_overlap(void)
+test_read_3d_filtered_dataset_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
MPI_Datatype vector_type;
MPI_Datatype resized_vector_type;
@@ -4870,14 +5798,13 @@ test_read_3d_filtered_dataset_overlap(void)
hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing read from shared filtered chunks in 3D dataset");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
dataset_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
dataset_dims[2] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
@@ -4916,6 +5843,9 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
VRFY((filespace >= 0), "File dataspace creation succeeded");
@@ -4925,44 +5855,43 @@ test_read_3d_filtered_dataset_overlap(void)
chunk_dims[1] = (hsize_t)READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
chunk_dims[2] = 1;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
+ dset_id = H5Dcreate2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME,
filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)(READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
@@ -5007,18 +5936,12 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
global_buf = (C_DATATYPE *)HDcalloc(1, correct_buf_size);
@@ -5068,7 +5991,7 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5088,7 +6011,8 @@ test_read_3d_filtered_dataset_overlap(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
+test_read_cmpd_filtered_dataset_no_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5102,16 +6026,23 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
- int * recvcounts = NULL;
- int * displs = NULL;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
@@ -5153,6 +6084,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5162,46 +6096,45 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME,
memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
dset_id =
- H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
@@ -5241,18 +6174,12 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5291,7 +6218,7 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5311,7 +6238,8 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_no_conversion_shared(void)
+test_read_cmpd_filtered_dataset_no_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5325,16 +6253,23 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id, memtype;
- hid_t filespace, memspace;
- int * recvcounts = NULL;
- int * displs = NULL;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID,
+ memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
if (MAINPROCESS)
HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
@@ -5382,6 +6317,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5391,46 +6329,45 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME,
memtype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
dset_id =
- H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
@@ -5470,18 +6407,12 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5520,7 +6451,7 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5540,7 +6471,8 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
+test_read_cmpd_filtered_dataset_type_conversion_unshared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5554,8 +6486,10 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
@@ -5563,7 +6497,12 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype "
"conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
@@ -5613,6 +6552,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5622,46 +6564,45 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME,
- H5P_DEFAULT);
+ dset_id =
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
@@ -5701,18 +6642,12 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5752,7 +6687,7 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -5772,7 +6707,8 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
* 05/17/2018
*/
static void
-test_read_cmpd_filtered_dataset_type_conversion_shared(void)
+test_read_cmpd_filtered_dataset_type_conversion_shared(const char *parent_group, H5Z_filter_t filter_id,
+ hid_t fapl_id, hid_t dcpl_id, hid_t dxpl_id)
{
COMPOUND_C_DATATYPE *read_buf = NULL;
COMPOUND_C_DATATYPE *correct_buf = NULL;
@@ -5786,8 +6722,10 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
hsize_t flat_dims[1];
size_t i, read_buf_size, correct_buf_size;
- hid_t file_id, dset_id, plist_id, filetype, memtype;
- hid_t filespace, memspace;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t filetype = H5I_INVALID_HID, memtype = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
int * recvcounts = NULL;
int * displs = NULL;
@@ -5795,7 +6733,12 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
HDputs(
"Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
- CHECK_CUR_FILTER_AVAIL();
+ /* SZIP and ScaleOffset filters don't support compound types */
+ if (filter_id == H5Z_FILTER_SZIP || filter_id == H5Z_FILTER_SCALEOFFSET) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
dataset_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
@@ -5851,6 +6794,9 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
dataset_dims, NULL);
@@ -5860,46 +6806,45 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
chunk_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
chunk_dims[1] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS,
chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
+ dset_id = H5Dcreate2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME,
filetype, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
dset_id =
- H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ H5Dopen2(group_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
sel_dims[0] = (hsize_t)READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t)mpi_size;
@@ -5939,18 +6884,12 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset read */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
read_buf_size = flat_dims[0] * sizeof(*read_buf);
read_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, read_buf_size);
VRFY((NULL != read_buf), "HDcalloc succeeded");
- VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0), "Dataset read succeeded");
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, dxpl_id, read_buf) >= 0), "Dataset read succeeded");
global_buf = (COMPOUND_C_DATATYPE *)HDcalloc(1, correct_buf_size);
VRFY((NULL != global_buf), "HDcalloc succeeded");
@@ -5989,7 +6928,7 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -6006,7 +6945,8 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
* 08/03/2017
*/
static void
-test_write_serial_read_parallel(void)
+test_write_serial_read_parallel(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -6014,14 +6954,13 @@ test_write_serial_read_parallel(void)
hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write file serially; read file in parallel");
- CHECK_CUR_FILTER_AVAIL();
-
dataset_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NROWS;
dataset_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_NCOLS;
dataset_dims[2] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_DEPTH;
@@ -6040,6 +6979,9 @@ test_write_serial_read_parallel(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
/* Create the dataspace for the dataset */
chunk_dims[0] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
chunk_dims[1] = (hsize_t)WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
@@ -6049,20 +6991,22 @@ test_write_serial_read_parallel(void)
VRFY((filespace >= 0), "File dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data);
@@ -6076,10 +7020,15 @@ test_write_serial_read_parallel(void)
VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
}
@@ -6095,28 +7044,16 @@ test_write_serial_read_parallel(void)
correct_buf[i] = (long)i;
/* All ranks open the file and verify their "portion" of the dataset is correct */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT);
+ dset_id = H5Dopen2(group_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
"Dataset read succeeded");
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
@@ -6127,13 +7064,13 @@ test_write_serial_read_parallel(void)
HDfree(read_buf);
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
}
-#if MPI_VERSION >= 3
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
/*
* Tests parallel write of filtered data
* to a dataset. After the write has
@@ -6145,7 +7082,8 @@ test_write_serial_read_parallel(void)
* 08/03/2017
*/
static void
-test_write_parallel_read_serial(void)
+test_write_parallel_read_serial(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id,
+ hid_t dcpl_id, hid_t dxpl_id)
{
C_DATATYPE *data = NULL;
C_DATATYPE *read_buf = NULL;
@@ -6158,27 +7096,18 @@ test_write_parallel_read_serial(void)
hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
size_t i, data_size, correct_buf_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing write file in parallel; read serially");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)WRITE_PARALLEL_READ_SERIAL_NROWS;
@@ -6198,20 +7127,22 @@ test_write_parallel_read_serial(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0),
"Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ dset_id = H5Dcreate2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/* Each process defines the dataset selection in memory and writes
@@ -6256,22 +7187,20 @@ test_write_parallel_read_serial(void)
for (i = 0; i < data_size / sizeof(*data); i++)
data[i] = (C_DATATYPE)GEN_DATA(i);
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
- VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
if (data)
HDfree(data);
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
if (MAINPROCESS) {
@@ -6286,7 +7215,10 @@ test_write_parallel_read_serial(void)
VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
- dset_id = H5Dopen2(file_id, "/" WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT);
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ dset_id = H5Dopen2(group_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset open succeeded");
correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
@@ -6307,6 +7239,7 @@ test_write_parallel_read_serial(void)
VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)), "Data verification succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
HDfree(correct_buf);
@@ -6326,9 +7259,11 @@ test_write_parallel_read_serial(void)
* 06/04/2018
*/
static void
-test_shrinking_growing_chunks(void)
+test_shrinking_growing_chunks(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
{
- double *data = NULL;
+ double *data = NULL;
+ double *read_buf = NULL;
hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
@@ -6337,27 +7272,18 @@ test_shrinking_growing_chunks(void)
hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
size_t i, data_size;
- hid_t file_id = -1, dset_id = -1, plist_id = -1;
- hid_t filespace = -1, memspace = -1;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID, memspace = H5I_INVALID_HID;
if (MAINPROCESS)
HDputs("Testing continually shrinking/growing chunks");
- CHECK_CUR_FILTER_AVAIL();
-
- /* Set up file access property list with parallel I/O access */
- plist_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((plist_id >= 0), "FAPL creation succeeded");
-
- VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
-
- file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
VRFY((file_id >= 0), "Test file open succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
/* Create the dataspace for the dataset */
dataset_dims[0] = (hsize_t)SHRINKING_GROWING_CHUNKS_NROWS;
@@ -6374,19 +7300,21 @@ test_shrinking_growing_chunks(void)
VRFY((memspace >= 0), "Memory dataspace creation succeeded");
/* Create chunked dataset */
- plist_id = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((plist_id >= 0), "DCPL creation succeeded");
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
/* Add test filter to the pipeline */
- VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
- dset_id = H5Dcreate2(file_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
+ dset_id = H5Dcreate2(group_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
H5P_DEFAULT, plist_id, H5P_DEFAULT);
VRFY((dset_id >= 0), "Dataset creation succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
/*
@@ -6417,39 +7345,1302 @@ test_shrinking_growing_chunks(void)
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
"Hyperslab selection succeeded");
- /* Create property list for collective dataset write */
- plist_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((plist_id >= 0), "DXPL creation succeeded");
-
- VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0), "Set DXPL MPIO succeeded");
-
data_size = sel_dims[0] * sel_dims[1] * sizeof(double);
data = (double *)HDcalloc(1, data_size);
VRFY((NULL != data), "HDcalloc succeeded");
+ read_buf = (double *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
/* Continually write random float data, followed by zeroed-out data */
- if ((i % 2))
+ if (i % 2)
HDmemset(data, 0, data_size);
else {
size_t j;
for (j = 0; j < data_size / sizeof(*data); j++) {
- data[j] = (float)(rand() / (double)(RAND_MAX / (double)1.0L));
+ data[j] = (rand() / (double)(RAND_MAX / (double)1.0L));
}
}
- VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data) >= 0),
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, data) >= 0),
"Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ if (i % 2) {
+ HDmemset(read_buf, 255, data_size);
+ }
+ else {
+ HDmemset(read_buf, 0, data_size);
+ }
+
+ VRFY((H5Dread(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "data verification succeeded");
}
+ if (read_buf)
+ HDfree(read_buf);
if (data)
HDfree(data);
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
- VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that filtered and unfiltered partial edge chunks can be
+ * written to and read from correctly in parallel when only one MPI
+ * rank writes to a particular partial edge chunk in the dataset.
+ *
+ * The dataset contains partial edge chunks in the second dimension.
+ * Each MPI rank selects a hyperslab in the shape of a single chunk
+ * that is offset to cover the whole edge chunk and part of the
+ * full chunk next to the edge chunk.
+ */
+static void
+test_edge_chunks_no_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared filtered edge chunks");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ start[1] =
+ (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Repeat the previous, but set option to not filter partial edge chunks */
+ if (MAINPROCESS)
+ HDputs("Testing write to unshared unfiltered edge chunks");
+
+ H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
+
+ dset_id = H5Dcreate2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t)mpi_rank * (hsize_t)WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ start[1] =
+ (hsize_t)(WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, (mpi_size > 1) ? SOME_CHUNKS_WRITTEN : ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that filtered and unfiltered partial edge chunks can be
+ * written to and read from correctly in parallel when every MPI
+ * rank writes to every partial edge chunk in the dataset.
+ *
+ * The dataset contains partial edge chunks in the second dimension.
+ * Each MPI rank selects a hyperslab in the shape of one row of each
+ * chunk that is offset in the second dimension to cover the whole
+ * edge chunk and part of the full chunk next to the edge chunk.
+ */
+static void
+test_edge_chunks_overlap(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing write to shared filtered edge chunks");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS / WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)1;
+ block[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Repeat the previous, but set option to not filter partial edge chunks */
+ if (MAINPROCESS)
+ HDputs("Testing write to shared unfiltered edge chunks");
+
+ H5Pset_chunk_opts(plist_id, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
+
+ dset_id = H5Dcreate2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, HDF5_DATATYPE_NAME,
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS / WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS);
+ count[1] = 1;
+ stride[0] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t)1;
+ block[1] = (hsize_t)WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank;
+ start[1] =
+ (hsize_t)(WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS - WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ dset_id = H5Dopen2(group_id, WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ HDmemset(read_buf, 255, data_size);
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, data, data_size)), "Data verification succeeded");
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that filtered and unfiltered partial edge chunks can be
+ * written to and read from correctly in parallel when only one
+ * MPI rank writes to a particular edge chunk in the dataset and
+ * only performs a partial write to the edge chunk.
+ *
+ * The dataset contains partial edge chunks in the second dimension.
+ * Each MPI rank selects a hyperslab in the shape of part of a single
+ * edge chunk and writes to just a portion of the edge chunk.
+ */
+static void
+test_edge_chunks_partial_write(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ /* TODO */
+}
+
+/*
+ * Tests that the parallel compression feature correctly handles
+ * writing fill values to a dataset and reading fill values from
+ * unallocated parts of a dataset.
+ */
+static void
+test_fill_values(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE fill_value;
+ hsize_t dataset_dims[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t chunk_dims[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t sel_dims[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t start[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t stride[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t count[FILL_VALUES_TEST_DATASET_DIMS];
+ hsize_t block[FILL_VALUES_TEST_DATASET_DIMS];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
+
+ if (MAINPROCESS)
+ HDputs("Testing fill values");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)FILL_VALUES_TEST_NROWS;
+ dataset_dims[1] = (hsize_t)FILL_VALUES_TEST_NCOLS;
+ chunk_dims[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ chunk_dims[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(FILL_VALUES_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, FILL_VALUES_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Set a fill value */
+ fill_value = FILL_VALUES_TEST_FILL_VAL;
+ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set");
+
+ dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT,
+ plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ correct_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ /* Read entire dataset and verify that the fill value is returned */
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ correct_buf[i] = FILL_VALUES_TEST_FILL_VAL;
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset and ensure that
+ * the fill value is returned for the unwritten part of
+ * the chunk, as well as for the rest of the dataset that
+ * hasn't been written to yet.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_VALUES_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /*
+ * Each MPI rank communicates their written piece of data
+ * into each other rank's correctness-checking buffer
+ */
+ recvcounts = HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(count[1] * block[1]);
+ displs[i] = (int)(i * dataset_dims[1]);
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to whole dataset and ensure fill value isn't returned
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_VALUES_TEST_NROWS / (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_VALUES_TEST_NCOLS / (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /********************************************************************
+ * Set the fill time to H5D_FILL_TIME_ALLOC and repeat the previous *
+ ********************************************************************/
+
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_ALLOC) >= 0), "H5Pset_fill_time succeeded");
+
+ dset_id = H5Dcreate2(group_id, FILL_VALUES_TEST_DATASET_NAME2, HDF5_DATATYPE_NAME, filespace, H5P_DEFAULT,
+ plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Read entire dataset and verify that the fill value is returned */
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ correct_buf[i] = FILL_VALUES_TEST_FILL_VAL;
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset and ensure that
+ * the fill value is returned for the unwritten part of
+ * the chunk, as well as for the rest of the dataset that
+ * hasn't been written to yet.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_VALUES_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(count[1] * block[1]);
+ displs[i] = (int)(i * dataset_dims[1]);
+ }
+
+ /*
+ * Each MPI rank communicates their written piece of data
+ * into each other rank's correctness-checking buffer
+ */
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, correct_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to whole dataset and ensure fill value isn't returned
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_VALUES_TEST_NROWS / (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_VALUES_TEST_NCOLS / (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_VALUES_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_VALUES_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUES_TEST_DATASET_NAME2, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ VRFY((read_buf[i] != FILL_VALUES_TEST_FILL_VAL), "Data verification succeeded");
+
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+ if (correct_buf)
+ HDfree(correct_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that the parallel compression feature can handle
+ * an undefined fill value. Nothing is verified in this
+ * test since the fill value isn't defined.
+ */
+static void
+test_fill_value_undefined(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ H5D_alloc_time_t alloc_time;
+ C_DATATYPE * data = NULL;
+ C_DATATYPE * read_buf = NULL;
+ hsize_t dataset_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t chunk_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t sel_dims[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t start[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t stride[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t count[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ hsize_t block[FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+
+ if (MAINPROCESS)
+ HDputs("Testing undefined fill value");
+
+ VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NROWS;
+ dataset_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NCOLS;
+ chunk_dims[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ chunk_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Set an undefined fill value */
+ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, NULL) >= 0), "Fill Value set");
+
+ dset_id = H5Dcreate2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ /*
+ * Read entire dataset - nothing to verify since there's no fill value.
+ * If not using early space allocation, the read should fail since storage
+ * isn't allocated yet and no fill value is defined.
+ */
+ if (alloc_time == H5D_ALLOC_TIME_EARLY) {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+ }
+ else {
+ H5E_BEGIN_TRY
+ {
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) < 0),
+ "Dataset read succeeded");
+ }
+ H5E_END_TRY;
+ }
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset. Don't verify
+ * anything since there's no fill value defined.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_VALUE_UNDEFINED_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /*
+ * Write to whole dataset and ensure data is correct
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NROWS / (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_NCOLS / (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_VALUE_UNDEFINED_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_VALUE_UNDEFINED_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests that the parallel compression feature correctly handles
+ * avoiding writing fill values to a dataset when the fill time
+ * is set as H5D_FILL_TIME_NEVER.
+ */
+static void
+test_fill_time_never(const char *parent_group, H5Z_filter_t filter_id, hid_t fapl_id, hid_t dcpl_id,
+ hid_t dxpl_id)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *fill_buf = NULL;
+ C_DATATYPE fill_value;
+ hsize_t dataset_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t chunk_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t sel_dims[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t start[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t stride[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t count[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ hsize_t block[FILL_TIME_NEVER_TEST_DATASET_DIMS];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID, dset_id = H5I_INVALID_HID, plist_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t filespace = H5I_INVALID_HID;
+ int * recvcounts = NULL;
+ int * displs = NULL;
+
+ if (MAINPROCESS)
+ HDputs("Testing fill time H5D_FILL_TIME_NEVER");
+
+ /*
+ * Only run this test when incremental file space allocation is
+ * used, as HDF5's chunk allocation code always writes fill values
+ * when filters are in the pipeline, but parallel compression does
+ * incremental file space allocation differently.
+ */
+ {
+ H5D_alloc_time_t alloc_time;
+
+ VRFY((H5Pget_alloc_time(dcpl_id, &alloc_time) >= 0), "H5Pget_alloc_time succeeded");
+
+ if (alloc_time != H5D_ALLOC_TIME_INCR) {
+ if (MAINPROCESS)
+ SKIPPED();
+ return;
+ }
+ }
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ group_id = H5Gopen2(file_id, parent_group, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gopen2 succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t)FILL_TIME_NEVER_TEST_NROWS;
+ dataset_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_NCOLS;
+ chunk_dims[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ chunk_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ sel_dims[0] = (hsize_t)DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS * (hsize_t)DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(FILL_TIME_NEVER_TEST_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcopy(dcpl_id);
+ VRFY((plist_id >= 0), "DCPL copy succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, FILL_TIME_NEVER_TEST_DATASET_DIMS, chunk_dims) >= 0), "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id, filter_id, NULL) >= 0), "Filter set");
+
+ /* Set a fill value */
+ fill_value = FILL_VALUES_TEST_FILL_VAL;
+ VRFY((H5Pset_fill_value(plist_id, HDF5_DATATYPE_NAME, &fill_value) >= 0), "Fill Value set");
+
+ /* Set fill time of 'never' */
+ VRFY((H5Pset_fill_time(plist_id, H5D_FILL_TIME_NEVER) >= 0), "H5Pset_fill_time succeeded");
+
+ dset_id = H5Dcreate2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, DATASET_JUST_CREATED);
+
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Allocate buffer for reading entire dataset */
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ fill_buf = HDcalloc(1, read_buf_size);
+ VRFY((NULL != fill_buf), "HDcalloc succeeded");
+
+ /* Read entire dataset and verify that the fill value isn't returned */
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ fill_buf[i] = FILL_TIME_NEVER_TEST_FILL_VAL;
+
+ /*
+ * It should be very unlikely for the dataset's random
+ * values to all be the fill value, so this should be
+ * a safe comparison in theory.
+ */
+ VRFY((0 != HDmemcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to part of the first chunk in the dataset with
+ * all ranks, then read the whole dataset and ensure that
+ * the fill value isn't returned for the unwritten part of
+ * the chunk, as well as for the rest of the dataset that
+ * hasn't been written to yet.
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t)(FILL_TIME_NEVER_TEST_CH_NCOLS - 1);
+ start[0] = (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+
+ data = (C_DATATYPE *)HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE)GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, SOME_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ /*
+ * Each MPI rank communicates their written piece of data
+ * into each other rank's correctness-checking buffer
+ */
+ recvcounts = HDcalloc(1, (size_t)mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = HDcalloc(1, (size_t)mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ recvcounts[i] = (int)(count[1] * block[1]);
+ displs[i] = (int)(i * dataset_dims[1]);
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(data, recvcounts[mpi_rank], C_DATATYPE_MPI, fill_buf, recvcounts,
+ displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ /*
+ * It should be very unlikely for the dataset's random
+ * values to all be the fill value, so this should be
+ * a safe comparison in theory.
+ */
+ VRFY((0 != HDmemcmp(read_buf, fill_buf, read_buf_size)), "Data verification succeeded");
+
+ /*
+ * Write to whole dataset and ensure fill value isn't returned
+ * after reading whole dataset back
+ */
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t)FILL_TIME_NEVER_TEST_NROWS / (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ count[1] = (hsize_t)FILL_TIME_NEVER_TEST_NCOLS / (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ stride[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS;
+ stride[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ block[0] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NROWS / (hsize_t)mpi_size;
+ block[1] = (hsize_t)FILL_TIME_NEVER_TEST_CH_NCOLS;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %" PRIuHSIZE ", %" PRIuHSIZE " ], stride[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ], start[ %" PRIuHSIZE ", %" PRIuHSIZE " ], block size[ %" PRIuHSIZE
+ ", %" PRIuHSIZE " ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_BLOCK, filespace, dxpl_id, data) >= 0),
+ "Dataset write succeeded");
+
+ /* Verify space allocation status */
+ verify_space_alloc_status(dset_id, plist_id, ALL_CHUNKS_WRITTEN);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ dset_id = H5Dopen2(group_id, FILL_TIME_NEVER_TEST_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ for (i = 0; i < read_buf_size / sizeof(*read_buf); i++)
+ VRFY((read_buf[i] != FILL_TIME_NEVER_TEST_FILL_VAL), "Data verification succeeded");
+
+ if (displs)
+ HDfree(displs);
+ if (recvcounts)
+ HDfree(recvcounts);
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+ if (fill_buf)
+ HDfree(fill_buf);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Gclose(group_id) >= 0), "Group close succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
return;
@@ -6459,8 +8650,14 @@ test_shrinking_growing_chunks(void)
int
main(int argc, char **argv)
{
- size_t i;
- hid_t file_id = -1, fapl = -1;
+ size_t cur_filter_idx = 0;
+ size_t num_filters = 0;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fcpl_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
int mpi_code;
/* Initialize MPI */
@@ -6487,7 +8684,7 @@ main(int argc, char **argv)
if (MAINPROCESS) {
HDprintf("==========================\n");
- HDprintf("Parallel Filters tests\n");
+ HDprintf(" Parallel Filters tests\n");
HDprintf("==========================\n\n");
}
@@ -6496,72 +8693,161 @@ main(int argc, char **argv)
TestAlarmOn();
- /* Create test file */
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "FAPL creation succeeded");
+ num_filters = ARRAY_SIZE(filterIDs);
- VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ /* Set up file access property list with parallel I/O access,
+ * collective metadata reads/writes and the latest library
+ * version bounds */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "FAPL creation succeeded");
- VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ VRFY((H5Pset_fapl_mpio(fapl_id, comm, info) >= 0), "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, TRUE) >= 0), "H5Pset_all_coll_metadata_ops succeeded");
+ VRFY((H5Pset_coll_metadata_write(fapl_id, TRUE) >= 0), "H5Pset_coll_metadata_write succeeded");
- VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL),
- "Test file name created");
-
- file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
- VRFY((file_id >= 0), "Test file creation succeeded");
-
- VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
- (*tests[i])();
- }
- else {
- if (MAINPROCESS)
- MESG("MPI_Barrier failed");
- nerrors++;
- }
- }
+ VRFY((H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
/*
- * Increment the filter index to switch to the checksum filter
- * and re-run the tests.
+ * Set up Paged and Persistent Free Space Management
*/
- cur_filter_idx++;
-
- h5_clean_files(FILENAME, fapl);
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl_id >= 0), "FCPL creation succeeded");
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl >= 0), "FAPL creation succeeded");
+ VRFY((H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, TRUE, 1) >= 0),
+ "H5Pset_file_space_strategy succeeded");
- VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
-
- VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
- "Set libver bounds succeeded");
+ VRFY((h5_fixname(FILENAME[0], fapl_id, filenames[0], sizeof(filenames[0])) != NULL),
+ "Test file name created");
- file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, fcpl_id, fapl_id);
VRFY((file_id >= 0), "Test file creation succeeded");
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ file_id = H5I_INVALID_HID;
- if (MAINPROCESS) {
- HDprintf("\n=================================================================\n");
- HDprintf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
- HDprintf("=================================================================\n\n");
- }
-
- for (i = 0; i < ARRAY_SIZE(tests); i++) {
- if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
- (*tests[i])();
- }
- else {
- if (MAINPROCESS)
- MESG("MPI_Barrier failed");
- nerrors++;
+ /* Create property list for collective dataset write */
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Create DCPL for dataset creation */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "DCPL creation succeeded");
+
+ /* Run tests with all available filters */
+ for (cur_filter_idx = 0; cur_filter_idx < num_filters; cur_filter_idx++) {
+ H5FD_mpio_chunk_opt_t chunk_opt;
+ H5Z_filter_t cur_filter = filterIDs[cur_filter_idx];
+
+ /* Run tests with both linked-chunk and multi-chunk I/O */
+ for (chunk_opt = H5FD_MPIO_CHUNK_ONE_IO; chunk_opt <= H5FD_MPIO_CHUNK_MULTI_IO; chunk_opt++) {
+ H5D_alloc_time_t space_alloc_time;
+
+ /* Run tests with all available space allocation times */
+ for (space_alloc_time = H5D_ALLOC_TIME_EARLY; space_alloc_time <= H5D_ALLOC_TIME_INCR;
+ space_alloc_time++) {
+ const char *alloc_time;
+ unsigned filter_config;
+ htri_t filter_avail;
+ size_t i;
+ char group_name[512];
+
+ switch (space_alloc_time) {
+ case H5D_ALLOC_TIME_EARLY:
+ alloc_time = "Early";
+ break;
+ case H5D_ALLOC_TIME_LATE:
+ alloc_time = "Late";
+ break;
+ case H5D_ALLOC_TIME_INCR:
+ alloc_time = "Incremental";
+ break;
+ default:
+ alloc_time = "Unknown";
+ }
+
+ if (MAINPROCESS)
+ HDprintf("== Running tests with filter '%s' using '%s' and '%s' allocation time ==\n\n",
+ filterNames[cur_filter_idx],
+ H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "Linked-Chunk I/O" : "Multi-Chunk I/O",
+ alloc_time);
+
+ /* Make sure current filter is available before testing with it */
+ filter_avail = H5Zfilter_avail(cur_filter);
+ VRFY((filter_avail >= 0), "H5Zfilter_avail succeeded");
+
+ if (!filter_avail) {
+ if (MAINPROCESS)
+ HDprintf(" ** SKIPPED tests with filter '%s' - filter unavailable **\n\n",
+ filterNames[cur_filter_idx]);
+ continue;
+ }
+
+ /* Get the current filter's info */
+ VRFY((H5Zget_filter_info(cur_filter, &filter_config) >= 0), "H5Zget_filter_info succeeded");
+
+ /* Determine if filter is encode-enabled */
+ if (0 == (filter_config & H5Z_FILTER_CONFIG_ENCODE_ENABLED)) {
+ if (MAINPROCESS)
+ HDprintf(" ** SKIPPED tests with filter '%s' - filter not encode-enabled **\n\n",
+ filterNames[cur_filter_idx]);
+ continue;
+ }
+
+ /* Set space allocation time */
+ VRFY((H5Pset_alloc_time(dcpl_id, space_alloc_time) >= 0), "H5Pset_alloc_time succeeded");
+
+ /* Set chunk I/O optimization method */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, chunk_opt) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ /* Create a group to hold all the datasets for this combination
+ * of filter and chunk optimization mode. Then, close the file
+ * again since some tests may need to open the file in a special
+ * way, like on rank 0 only */
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen succeeded");
+
+ HDsnprintf(group_name, sizeof(group_name), "%s_%s_%s", filterNames[cur_filter_idx],
+ H5FD_MPIO_CHUNK_ONE_IO == chunk_opt ? "linked-chunk-io" : "multi-chunk-io",
+ alloc_time);
+
+ group_id = H5Gcreate2(file_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group_id >= 0), "H5Gcreate2 succeeded");
+
+ VRFY((H5Gclose(group_id) >= 0), "H5Gclose failed");
+ group_id = H5I_INVALID_HID;
+
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+ file_id = H5I_INVALID_HID;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ test_func func = tests[i];
+
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ func(group_name, cur_filter, fapl_id, dcpl_id, dxpl_id);
+ }
+ else {
+ if (MAINPROCESS)
+ MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
+
+ if (MAINPROCESS)
+ HDputs("");
+ }
}
}
+ VRFY((H5Pclose(dcpl_id) >= 0), "DCPL close succeeded");
+ dcpl_id = H5I_INVALID_HID;
+
+ VRFY((H5Pclose(dxpl_id) >= 0), "DXPL close succeeded");
+ dxpl_id = H5I_INVALID_HID;
+
if (nerrors)
goto exit;
@@ -6575,7 +8861,21 @@ exit:
TestAlarmOff();
- h5_clean_files(FILENAME, fapl);
+ h5_clean_files(FILENAME, fapl_id);
+ fapl_id = H5I_INVALID_HID;
+
+ if (dcpl_id >= 0)
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ if (dxpl_id >= 0)
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ if (fapl_id >= 0)
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ if (fcpl_id >= 0)
+ VRFY((H5Pclose(fcpl_id) >= 0), "H5Pclose succeeded");
+ if (group_id >= 0)
+ VRFY((H5Gclose(group_id) >= 0), "H5Gclose succeeded");
+ if (file_id >= 0)
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
H5close();
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
index 7eb34ed..800604c 100644
--- a/testpar/t_filters_parallel.h
+++ b/testpar/t_filters_parallel.h
@@ -30,23 +30,23 @@
#include "stdlib.h"
#include "testpar.h"
+#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+
/* Used to load other filters than GZIP */
/* #define DYNAMIC_FILTER */ /* Uncomment and define the fields below to use a dynamically loaded filter */
+
+#ifdef DYNAMIC_FILTER
#define FILTER_NUM_CDVALUES 1
const unsigned int cd_values[FILTER_NUM_CDVALUES] = {0};
-H5Z_filter_t filter_id;
-unsigned int flags = 0;
-size_t cd_nelmts = FILTER_NUM_CDVALUES;
-
-/* Utility Macros */
-#define STRINGIFY(type) #type
+unsigned int flags = 0;
+size_t cd_nelmts = FILTER_NUM_CDVALUES;
+#endif
/* Common defines for all tests */
-#define C_DATATYPE long
-#define C_DATATYPE_MPI MPI_LONG
-#define COMPOUND_C_DATATYPE cmpd_filtered_t
-#define C_DATATYPE_STR(type) STRINGIFY(type)
-#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
+#define C_DATATYPE long
+#define C_DATATYPE_MPI MPI_LONG
+#define COMPOUND_C_DATATYPE cmpd_filtered_t
+#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
/* Macro used to generate data for datasets for later verification */
#define GEN_DATA(i) INCREMENTAL_DATA(i)
@@ -59,7 +59,7 @@ size_t cd_nelmts = FILTER_NUM_CDVALUES;
#define RANK_DATA(i) \
(mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
-#define DEFAULT_DEFLATE_LEVEL 6
+#define DEFAULT_DEFLATE_LEVEL 9
#define DIM0_SCALE_FACTOR 4
#define DIM1_SCALE_FACTOR 2
@@ -89,6 +89,14 @@ typedef struct {
#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
+/* Defines for the unshared filtered chunks partial write test */
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_NAME "unshared_filtered_chunks_partial_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_DATASET_DIMS 2
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_PARTIAL_CH_NCOLS (DIM1_SCALE_FACTOR)
+
/* Defines for the shared filtered chunks write test */
#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_write"
#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
@@ -97,6 +105,42 @@ typedef struct {
#define WRITE_SHARED_FILTERED_CHUNKS_NROWS (WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
+/* Defines for the unshared filtered chunks w/ single unlim. dimension write test */
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_NAME "unshared_filtered_chunks_single_unlim_dim_write"
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NROWS (WRITE_UNSHARED_ONE_UNLIM_DIM_NROWS / mpi_size)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_CH_NCOLS (WRITE_UNSHARED_ONE_UNLIM_DIM_NCOLS / mpi_size)
+#define WRITE_UNSHARED_ONE_UNLIM_DIM_NLOOPS 5
+
+/* Defines for the shared filtered chunks w/ single unlim. dimension write test */
+#define WRITE_SHARED_ONE_UNLIM_DIM_DATASET_NAME "shared_filtered_chunks_single_unlim_dim_write"
+#define WRITE_SHARED_ONE_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS (mpi_size)
+#define WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_ONE_UNLIM_DIM_NROWS (WRITE_SHARED_ONE_UNLIM_DIM_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_ONE_UNLIM_DIM_NCOLS (WRITE_SHARED_ONE_UNLIM_DIM_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_ONE_UNLIM_DIM_NLOOPS 5
+
+/* Defines for the unshared filtered chunks w/ two unlim. dimension write test */
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_NAME "unshared_filtered_chunks_two_unlim_dim_write"
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_TWO_UNLIM_DIM_NLOOPS 5
+
+/* Defines for the shared filtered chunks w/ two unlim. dimension write test */
+#define WRITE_SHARED_TWO_UNLIM_DIM_DATASET_NAME "shared_filtered_chunks_two_unlim_dim_write"
+#define WRITE_SHARED_TWO_UNLIM_DIM_DATASET_DIMS 2
+#define WRITE_SHARED_TWO_UNLIM_DIM_CH_NROWS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_NROWS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_NCOLS (mpi_size)
+#define WRITE_SHARED_TWO_UNLIM_DIM_NLOOPS 5
+
/* Defines for the filtered chunks write test where a process has no selection */
#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write"
#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
@@ -403,4 +447,53 @@ typedef struct {
#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size)
#define SHRINKING_GROWING_CHUNKS_NLOOPS 20
+/* Defines for the unshared filtered edge chunks write test */
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "unshared_filtered_edge_chunks_write"
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2 "unshared_filtered_edge_chunks_no_filter_write"
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS 2
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_NCOLS \
+ (mpi_size * DIM1_SCALE_FACTOR) + (WRITE_UNSHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS - 1)
+
+/* Defines for the shared filtered edge chunks write test */
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME "shared_filtered_edge_chunks_write"
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_NAME2 "shared_filtered_edge_chunks_no_filter_write"
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_DATASET_DIMS 2
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS (mpi_size)
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_NROWS \
+ (WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_EDGE_CHUNKS_NCOLS \
+ ((WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR) + \
+ (WRITE_SHARED_FILTERED_EDGE_CHUNKS_CH_NCOLS - 1))
+
+/* Defines for the fill values test */
+#define FILL_VALUES_TEST_DATASET_NAME "fill_value_test"
+#define FILL_VALUES_TEST_DATASET_NAME2 "fill_value_alloc_test"
+#define FILL_VALUES_TEST_DATASET_DIMS 2
+#define FILL_VALUES_TEST_FILL_VAL (-1)
+#define FILL_VALUES_TEST_CH_NROWS (mpi_size)
+#define FILL_VALUES_TEST_CH_NCOLS (mpi_size + 1)
+#define FILL_VALUES_TEST_NROWS (FILL_VALUES_TEST_CH_NROWS * DIM0_SCALE_FACTOR)
+#define FILL_VALUES_TEST_NCOLS (FILL_VALUES_TEST_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the undefined fill value test */
+#define FILL_VALUE_UNDEFINED_TEST_DATASET_NAME "fill_value_undefined_test"
+#define FILL_VALUE_UNDEFINED_TEST_DATASET_DIMS 2
+#define FILL_VALUE_UNDEFINED_TEST_CH_NROWS (mpi_size)
+#define FILL_VALUE_UNDEFINED_TEST_CH_NCOLS (mpi_size + 1)
+#define FILL_VALUE_UNDEFINED_TEST_NROWS (FILL_VALUE_UNDEFINED_TEST_CH_NROWS * DIM0_SCALE_FACTOR)
+#define FILL_VALUE_UNDEFINED_TEST_NCOLS (FILL_VALUE_UNDEFINED_TEST_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the fill time of 'never' test */
+#define FILL_TIME_NEVER_TEST_DATASET_NAME "fill_time_never_test"
+#define FILL_TIME_NEVER_TEST_DATASET_DIMS 2
+#define FILL_TIME_NEVER_TEST_FILL_VAL (-1)
+#define FILL_TIME_NEVER_TEST_CH_NROWS (mpi_size)
+#define FILL_TIME_NEVER_TEST_CH_NCOLS (mpi_size + 1)
+#define FILL_TIME_NEVER_TEST_NROWS (FILL_TIME_NEVER_TEST_CH_NROWS * DIM0_SCALE_FACTOR)
+#define FILL_TIME_NEVER_TEST_NCOLS (FILL_TIME_NEVER_TEST_CH_NCOLS * DIM1_SCALE_FACTOR)
+
#endif /* TEST_PARALLEL_FILTERS_H_ */
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index cc569f6..0500a2d 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -43,7 +43,7 @@ static int data_g[100][100];
*-------------------------------------------------------------------------
*/
static hid_t
-create_test_file(char *name, hid_t fapl_id)
+create_test_file(char *name, size_t name_length, hid_t fapl_id)
{
hid_t fid = H5I_INVALID_HID;
hid_t dcpl_id = H5I_INVALID_HID;
@@ -86,7 +86,7 @@ create_test_file(char *name, hid_t fapl_id)
if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
- HDsprintf(name, "grp%02u", (unsigned)i);
+ HDsnprintf(name, name_length, "grp%02u", (unsigned)i);
if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
@@ -153,7 +153,7 @@ main(int argc, char *argv[])
/* Create the file */
h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
- if ((fid1 = create_test_file(name, fapl_id)) < 0)
+ if ((fid1 = create_test_file(name, sizeof(name), fapl_id)) < 0)
goto error;
/* Flush and exit without closing the library */
if (H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
@@ -161,7 +161,7 @@ main(int argc, char *argv[])
/* Create the other file which will not be flushed */
h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
- if ((fid2 = create_test_file(name, fapl_id)) < 0)
+ if ((fid2 = create_test_file(name, sizeof(name), fapl_id)) < 0)
goto error;
if (mpi_rank == 0)
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index e782f8a..8cf40d0 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -43,7 +43,7 @@ static int data_g[100][100];
*-------------------------------------------------------------------------
*/
static herr_t
-check_test_file(char *name, hid_t fapl_id)
+check_test_file(char *name, size_t name_length, hid_t fapl_id)
{
hid_t fid = H5I_INVALID_HID;
hid_t sid = H5I_INVALID_HID;
@@ -89,7 +89,7 @@ check_test_file(char *name, hid_t fapl_id)
if ((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
- HDsprintf(name, "grp%02u", (unsigned)i);
+ HDsnprintf(name, name_length, "grp%02u", (unsigned)i);
if ((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
@@ -182,7 +182,7 @@ main(int argc, char *argv[])
/* Check the case where the file was flushed */
h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name));
- if (check_test_file(name, fapl_id1)) {
+ if (check_test_file(name, sizeof(name), fapl_id1)) {
H5_FAILED()
goto error;
}
@@ -199,7 +199,7 @@ main(int argc, char *argv[])
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name));
- if (check_test_file(name, fapl_id2)) {
+ if (check_test_file(name, sizeof(name), fapl_id2)) {
if (mpi_rank == 0)
PASSED();
}
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 5979c5d..606e100 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -556,7 +556,7 @@ external_links(void)
/* test opening a group that is to an external link, the external linked
file should inherit the source file's access properties */
- HDsprintf(link_path, "%s%s%s", group_path, "/", link_name);
+ HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
group = H5Gopen2(fid, link_path, H5P_DEFAULT);
VRFY((group >= 0), "H5Gopen succeeded");
ret = H5Gclose(group);
diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c
new file mode 100644
index 0000000..2072afe
--- /dev/null
+++ b/testpar/t_vfd.c
@@ -0,0 +1,4055 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ *
+ * This file is a catchall for parallel VFD tests.
+ */
+
+#include "testphdf5.h"
+
+/* Must be a power of 2. Reducing it below 1024 may cause problems */
+#define INTS_PER_RANK 1024
+
+/* global variable declarations: */
+
+hbool_t pass = TRUE; /* set to FALSE on error */
+const char *failure_mssg = NULL;
+
+const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/
+ "mpio_vfd_test_file_1", /*1*/
+ "mpio_vfd_test_file_2", /*2*/
+ "mpio_vfd_test_file_3", /*3*/
+ "mpio_vfd_test_file_4", /*4*/
+ "mpio_vfd_test_file_5", /*5*/
+ NULL};
+
+/* File Test Images
+ *
+ * Pointers to dynamically allocated buffers of size
+ * INTS_PER_RANK * sizeof(int32_t) * mpi_size(). These
+ * buffers are used to put the test file in a known
+ * state, and to test if the test file contains the
+ * expected data.
+ */
+
+int32_t *increasing_fi_buf = NULL;
+int32_t *decreasing_fi_buf = NULL;
+int32_t *negative_fi_buf = NULL;
+int32_t *zero_fi_buf = NULL;
+int32_t *read_fi_buf = NULL;
+
+/* local utility function declarations */
+
+static unsigned alloc_and_init_file_images(int mpi_size);
+static void free_file_images(void);
+static void setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name, haddr_t eoa,
+ H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr);
+static void takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fapl_id_ptr,
+ hid_t *dxpl_id_ptr);
+
+/* test functions */
+static unsigned vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+
+static unsigned vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+static unsigned vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name);
+
+/****************************************************************************/
+/***************************** Utility Functions ****************************/
+/****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: alloc_and_init_file_images
+ *
+ * Purpose: Allocate and initialize the global buffers used to construct,
+ * load and verify test file contents.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/25/26
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+alloc_and_init_file_images(int mpi_size)
+{
+ const char *fcn_name = "alloc_and_init_file_images()";
+ int cp = 0;
+ int buf_len;
+ size_t buf_size;
+ int i;
+ hbool_t show_progress = FALSE;
+
+ pass = TRUE;
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* allocate the file image buffers */
+ if (pass) {
+
+ buf_len = INTS_PER_RANK * mpi_size;
+ buf_size = sizeof(int32_t) * (size_t)INTS_PER_RANK * (size_t)mpi_size;
+
+ increasing_fi_buf = (int32_t *)HDmalloc(buf_size);
+ decreasing_fi_buf = (int32_t *)HDmalloc(buf_size);
+ negative_fi_buf = (int32_t *)HDmalloc(buf_size);
+ zero_fi_buf = (int32_t *)HDmalloc(buf_size);
+ read_fi_buf = (int32_t *)HDmalloc(buf_size);
+
+ if ((!increasing_fi_buf) || (!decreasing_fi_buf) || (!negative_fi_buf) || (!zero_fi_buf) ||
+ (!read_fi_buf)) {
+
+ pass = FALSE;
+ failure_mssg = "Can't allocate one or more file image buffers.";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* initialize the file image buffers */
+ if (pass) {
+
+ for (i = 0; i < buf_len; i++) {
+
+ increasing_fi_buf[i] = i;
+ decreasing_fi_buf[i] = buf_len - i;
+ negative_fi_buf[i] = -i;
+ zero_fi_buf[i] = 0;
+ read_fi_buf[i] = 0;
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* discard file image buffers if there was an error */
+ if (!pass) {
+
+ free_file_images();
+ }
+
+ return !pass;
+
+} /* alloc_and_init_file_images() */
+
+/*-------------------------------------------------------------------------
+ * Function: free_file_images
+ *
+ * Purpose: Deallocate any glogal file image buffers that exist, and
+ * set their associated pointers to NULL.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/25/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+free_file_images(void)
+{
+ if (increasing_fi_buf) {
+
+ HDfree(increasing_fi_buf);
+ increasing_fi_buf = NULL;
+ }
+
+ if (decreasing_fi_buf) {
+
+ HDfree(decreasing_fi_buf);
+ decreasing_fi_buf = NULL;
+ }
+
+ if (negative_fi_buf) {
+
+ HDfree(negative_fi_buf);
+ negative_fi_buf = NULL;
+ }
+
+ if (zero_fi_buf) {
+
+ HDfree(zero_fi_buf);
+ zero_fi_buf = NULL;
+ }
+
+ if (read_fi_buf) {
+
+ HDfree(read_fi_buf);
+ read_fi_buf = NULL;
+ }
+
+ return;
+
+} /* free_file_images() */
+
+/*-------------------------------------------------------------------------
+ * Function: setup_vfd_test_file
+ *
+ * Purpose: Create / open the specified test file with the specified
+ * VFD, and set the EOA to the specified value.
+ *
+ * Setup the dxpl for subsequent I/O via the target VFD.
+ *
+ * Return a pointer to the instance of H5FD_t created on
+ * file open in *lf_ptr, and the FAPL and DXPL ids in
+ * *fapl_id_ptr and *dxpl_id_ptr. Similarly, copy the
+ * "fixed" file name into file_name on exit.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/25/26
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name, haddr_t eoa,
+ H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr)
+{
+ const char *fcn_name = "setup_vfd_test_file()";
+ char filename[512];
+ int cp = 0;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ unsigned flags = 0; /* file open flags */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+
+ HDassert(vfd_name);
+ HDassert(lf_ptr);
+ HDassert(fapl_id_ptr);
+ HDassert(dxpl_id_ptr);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setupf fapl for target VFD */
+ if (pass) {
+
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "Can't create fapl.";
+ }
+ }
+
+ if (pass) {
+
+ if (strcmp(vfd_name, "mpio") == 0) {
+
+ if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "Can't set mpio fapl.";
+ }
+ }
+ else {
+
+ pass = FALSE;
+ failure_mssg = "un-supported VFD";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if (pass) {
+
+ if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Open the VFD test file with the specified VFD. */
+
+ if (pass) {
+
+ flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC;
+
+ if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDopen() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* set eoa as specified */
+
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ if (H5FDset_eoa(lf, H5FD_MEM_DEFAULT, eoa) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDset_eoa() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if (pass) { /* setup dxpl */
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ if (dxpl_id < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if (pass) {
+
+ if (H5Pset_dxpl_mpio(dxpl_id, xfer_mode) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if (pass) {
+
+ if (H5Pset_dxpl_mpio_collective_opt(dxpl_id, coll_opt_mode) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.";
+ }
+ }
+
+ if (pass) { /* setup pointers with return values */
+
+ strncpy(file_name, filename, 512);
+ *lf_ptr = lf;
+ *fapl_id_ptr = fapl_id;
+ *dxpl_id_ptr = dxpl_id;
+ }
+ else { /* tidy up from failure as possible */
+
+ if (lf)
+ H5FDclose(lf);
+
+ if (fapl_id != -1)
+ H5Pclose(fapl_id);
+
+ if (dxpl_id != -1)
+ H5Pclose(dxpl_id);
+ }
+
+ return;
+
+} /* setup_vfd_test_file() */
+
+/*-------------------------------------------------------------------------
+ * Function: takedown_vfd_test_file
+ *
+ * Purpose: Close and delete the specified test file. Close the
+ * FAPL & DXPL.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/25/26
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr)
+{
+ const char *fcn_name = "takedown_vfd_test_file()";
+ int cp = 0;
+ hbool_t show_progress = FALSE;
+
+ HDassert(lf_ptr);
+ HDassert(fapl_id_ptr);
+ HDassert(dxpl_id_ptr);
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Close the test file if it is open, regardless of the value of pass.
+ * This should let the test program shut down more cleanly.
+ */
+
+ if (*lf_ptr) {
+
+ if (H5FDclose(*lf_ptr) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDclose() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On rank 0, delete the test file.
+ */
+
+ if (pass) {
+
+ /* wait for everyone to close the file */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ((mpi_rank == 0) && (HDremove(filename) < 0)) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+
+ /* wait for the file delete to complete */
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Close the fapl */
+ if (H5Pclose(*fapl_id_ptr) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "can't close fapl.\n";
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Close the dxpl */
+ if (H5Pclose(*dxpl_id_ptr) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "can't close dxpl.\n";
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* takedown_vfd_test_file() */
+
+/****************************************************************************/
+/******************************* Test Functions *****************************/
+/****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: vector_read_test_1()
+ *
+ * Purpose: Simple vector read test:
+ *
+ * 1) Open the test file with the specified VFD, set the eoa,
+ * and setup the DXPL.
+ *
+ * 2) Using rank zero, write the entire increasing_fi_buf to
+ * the file.
+ *
+ * 3) Barrier
+ *
+ * 4) On each rank, zero the read buffer, and then read
+ * INTS_PER_RANK * sizeof(int32) bytes from the file
+ * starting at offset mpi_rank * INTS_PER_RANK *
+ * sizeof(int32_t) in both the file and read_fi_buf.
+ * Do this with a vector read containing a single
+ * element.
+ *
+ * Verify that read_fi_buf contains zeros for all
+ * indices less than mpi_rank * INTS_PER_RANK, or
+ * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK.
+ * For all other indices, read_fi_buf should equal
+ * increasing_fi_buf.
+ *
+ * 5) Barrier
+ *
+ * 6) Close the test file.
+ *
+ * 7) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_read_test_1()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ uint32_t count;
+ H5FD_mem_t types[1];
+ haddr_t addrs[1];
+ size_t sizes[1];
+ void * bufs[1];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector read test 1 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector read test 1 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector read test 1 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Using rank zero, write the entire increasing_fi_buf to
+ * the file.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (mpi_rank == 0) {
+
+ if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) <
+ 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite() on rank 0 failed.\n";
+ }
+ }
+ }
+
+ /* 3) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) On each rank, zero the read buffer, and then read
+ * INTS_PER_RANK * sizeof(int32) bytes from the file
+ * starting at offset mpi_rank * INTS_PER_RANK *
+ * sizeof(int32_t) in both the file and read_fi_buf.
+ * Do this with a vector read containing a single
+ * element.
+ *
+ * Verify that read_fi_buf contains zeros for all
+ * indices less than mpi_rank * INTS_PER_RANK, or
+ * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK.
+ * For all other indices, read_fi_buf should equal
+ * increasing_fi_buf.
+ */
+ if (pass) {
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ read_fi_buf[i] = 0;
+ }
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread_vector() failed.\n";
+ }
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ if ((i < mpi_rank * INTS_PER_RANK) || (i >= (mpi_rank + 1) * INTS_PER_RANK)) {
+
+ if (read_fi_buf[i] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in read_fi_buf (1).\n";
+ break;
+ }
+ }
+ else {
+
+ if (read_fi_buf[i] != increasing_fi_buf[i]) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in read_fi_buf (2).\n";
+ break;
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_read_test_1() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_read_test_2()
+ *
+ * Purpose: Simple vector read test with only half of ranks
+ * participating in each vector read.
+ *
+ * 1) Open the test file with the specified VFD, set the eoa,
+ * and setup the DXPL.
+ *
+ * 2) Using rank zero, write the entire decreasing_fi_buf to
+ * the file.
+ *
+ * 3) Barrier
+ *
+ * 4) On each rank, zero the read buffer.
+ *
+ * 5) On even ranks, read INTS_PER_RANK * sizeof(int32)
+ * bytes from the file starting at offset mpi_rank *
+ * INTS_PER_RANK * sizeof(int32_t) in both the file and
+ * read_fi_buf. Do this with a vector read containing
+ * a single element.
+ *
+ * Odd ranks perform an empty read.
+ *
+ * 6) Barrier.
+ *
+ * 7) On odd ranks, read INTS_PER_RANK * sizeof(int32)
+ * bytes from the file starting at offset mpi_rank *
+ * INTS_PER_RANK * sizeof(int32_t) in both the file and
+ * read_fi_buf. Do this with a vector read containing
+ * a single element.
+ *
+ * Even ranks perform an empty read.
+ *
+ * 8) Verify that read_fi_buf contains zeros for all
+ * indices less than mpi_rank * INTS_PER_RANK, or
+ * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK.
+ * For all other indices, read_fi_buf should equal
+ * decreasing_fi_buf.
+ *
+ * 9) Barrier
+ *
+ * 10) Close the test file.
+ *
+ * 11) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_read_test_2()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ uint32_t count;
+ H5FD_mem_t types[1];
+ haddr_t addrs[1];
+ size_t sizes[1];
+ void * bufs[1];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector read test 2 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector read test 2 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector read test 2 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Using rank zero, write the entire decreasing_fi_buf to
+ * the file.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (mpi_rank == 0) {
+
+ if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)decreasing_fi_buf) <
+ 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite() on rank 0 failed.\n";
+ }
+ }
+ }
+
+ /* 3) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) On each rank, zero the read buffer. */
+ if (pass) {
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ read_fi_buf[i] = 0;
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) On even ranks, read INTS_PER_RANK * sizeof(int32)
+ * bytes from the file starting at offset mpi_rank *
+ * INTS_PER_RANK * sizeof(int32_t) in both the file and
+ * read_fi_buf. Do this with a vector read containing
+ * a single element.
+ *
+ * Odd ranks perform an empty read.
+ */
+ if (pass) {
+
+ if (mpi_rank % 2 == 0) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK]));
+ }
+ else {
+
+ count = 0;
+ }
+
+ if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) On odd ranks, read INTS_PER_RANK * sizeof(int32)
+ * bytes from the file starting at offset mpi_rank *
+ * INTS_PER_RANK * sizeof(int32_t) in both the file and
+ * read_fi_buf. Do this with a vector read containing
+ * a single element.
+ *
+ * Even ranks perform an empty read.
+ */
+ if (pass) {
+
+ if (mpi_rank % 2 == 1) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK]));
+ }
+ else {
+
+ count = 0;
+ }
+
+ if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Verify that read_fi_buf contains zeros for all
+ * indices less than mpi_rank * INTS_PER_RANK, or
+ * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK.
+ * For all other indices, read_fi_buf should equal
+ * decreasing_fi_buf.
+ */
+
+ if (pass) {
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ if ((i < mpi_rank * INTS_PER_RANK) || (i >= (mpi_rank + 1) * INTS_PER_RANK)) {
+
+ if (read_fi_buf[i] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in read_fi_buf (1).\n";
+ break;
+ }
+ }
+ else {
+
+ if (read_fi_buf[i] != decreasing_fi_buf[i]) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in read_fi_buf (2).\n";
+ break;
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 9) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 10) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_read_test_2() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_read_test_3()
+ *
+ * Purpose: Verify that vector read works with multiple entries in
+ * the vector in each read, and that read buffers need not
+ * be in increasing (memory) address order.
+ *
+ * 1) Open the test file with the specified VFD, set the eoa,
+ * and setup the DXPL.
+ *
+ * 2) Using rank zero, write the entire negative_fi_buf to
+ * the file.
+ *
+ * 3) Barrier
+ *
+ * 4) On each rank, zero the four read buffers.
+ *
+ * 5) On each rank, do a vector read from the file, with
+ * each rank's vector having four elements, with each
+ * element reading INTS_PER_RANK / 4 * sizeof(int32)
+ * bytes, and the reads starting at address:
+ *
+ * (mpi_rank * INTS_PER_RANK) * sizeof(int32_t)
+ *
+ * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 4) *
+ * sizeof(int32_t)
+ *
+ * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 2) *
+ * sizeof(int32_t)
+ *
+ * (mpi_rank * INTS_PER_RANK + 3 * INTS_PER_RANK / 2) *
+ * sizeof(int32_t)
+ *
+ * On even ranks, the targets of the reads should be
+ * buf_0, buf_1, buf_2, and buf_3 respectively.
+ *
+ * On odd ranks, the targets of the reads should be
+ * buf_3, buf_2, buf_1, and buf_0 respectively.
+ *
+ * This has the effect of ensuring that on at least
+ * some ranks, the read buffers are not in increasing
+ * address order.
+ *
+ * 6) Verify that buf_0, buf_1, buf_2, and buf_3 contain
+ * the expected data. Note that this will be different
+ * on even vs. odd ranks.
+ *
+ * 7) Barrier.
+ *
+ * 8) Close the test file.
+ *
+ * 9) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_read_test_3()";
+ char test_title[120];
+ char filename[512];
+ int32_t buf_0[(INTS_PER_RANK / 4) + 1];
+ int32_t buf_1[(INTS_PER_RANK / 4) + 1];
+ int32_t buf_2[(INTS_PER_RANK / 4) + 1];
+ int32_t buf_3[(INTS_PER_RANK / 4) + 1];
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ uint32_t count;
+ H5FD_mem_t types[4];
+ haddr_t addrs[4];
+ size_t sizes[4];
+ void * bufs[4];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector read test 3 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector read test 3 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector read test 3 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Using rank zero, write the entire negative_fi_buf to
+ * the file.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (mpi_rank == 0) {
+
+ if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)negative_fi_buf) <
+ 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite() on rank 0 failed.\n";
+ }
+ }
+ }
+
+ /* 3) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) On each rank, zero the four read buffers. */
+ if (pass) {
+
+ for (i = 0; i <= INTS_PER_RANK / 4; i++) {
+
+ buf_0[i] = 0;
+ buf_1[i] = 0;
+ buf_2[i] = 0;
+ buf_3[i] = 0;
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) On each rank, do a vector read from the file, with
+ * each rank's vector having four elements, with each
+ * element reading INTS_PER_RANK / 4 * sizeof(int32)
+ * bytes, and the reads starting at address:
+ *
+ * (mpi_rank * INTS_PER_RANK) * sizeof(int32_t)
+ *
+ * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 4) *
+ * sizeof(int32_t)
+ *
+ * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 2) *
+ * sizeof(int32_t)
+ *
+ * (mpi_rank * INTS_PER_RANK + 3 * INTS_PER_RANK / 2) *
+ * sizeof(int32_t)
+ *
+ * On even ranks, the targets of the reads should be
+ * buf_0, buf_1, buf_2, and buf_3 respectively.
+ *
+ * On odd ranks, the targets of the reads should be
+ * buf_3, buf_2, buf_1, and buf_0 respectively.
+ *
+ * This has the effect of ensuring that on at least
+ * some ranks, the read buffers are not in increasing
+ * address order.
+ */
+ if (pass) {
+
+ haddr_t base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ count = 4;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr;
+ sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t);
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = base_addr + ((haddr_t)(INTS_PER_RANK / 4) * (haddr_t)(sizeof(int32_t)));
+ sizes[1] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t);
+
+ types[2] = H5FD_MEM_DRAW;
+ addrs[2] = base_addr + ((haddr_t)(INTS_PER_RANK / 2) * (haddr_t)(sizeof(int32_t)));
+ sizes[2] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t);
+
+ types[3] = H5FD_MEM_DRAW;
+ addrs[3] = base_addr + ((haddr_t)(3 * INTS_PER_RANK / 4) * (haddr_t)(sizeof(int32_t)));
+ sizes[3] = (size_t)INTS_PER_RANK / 4 * sizeof(int32_t);
+
+ if (mpi_rank % 2 == 0) {
+
+ bufs[0] = (void *)(&(buf_0[0]));
+ bufs[1] = (void *)(buf_1);
+ bufs[2] = (void *)(buf_2);
+ bufs[3] = (void *)(buf_3);
+ }
+ else {
+
+ bufs[0] = (void *)(&(buf_3[0]));
+ bufs[1] = (void *)(buf_2);
+ bufs[2] = (void *)(buf_1);
+ bufs[3] = (void *)(buf_0);
+ }
+
+ if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) Verify that buf_0, buf_1, buf_2, and buf_3 contain
+ * the expected data. Note that this will be different
+ * on even vs. odd ranks.
+ */
+ if (pass) {
+
+ int base_index = mpi_rank * INTS_PER_RANK;
+
+ for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) {
+
+ if (((mpi_rank % 2 == 0) && (buf_0[i] != negative_fi_buf[base_index + i])) ||
+ ((mpi_rank % 2 == 1) && (buf_3[i] != negative_fi_buf[base_index + i]))) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in buf (1).\n";
+ }
+ }
+
+ base_index += INTS_PER_RANK / 4;
+
+ for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) {
+
+ if (((mpi_rank % 2 == 0) && (buf_1[i] != negative_fi_buf[base_index + i])) ||
+ ((mpi_rank % 2 == 1) && (buf_2[i] != negative_fi_buf[base_index + i]))) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in buf (2).\n";
+ }
+ }
+
+ base_index += INTS_PER_RANK / 4;
+
+ for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) {
+
+ if (((mpi_rank % 2 == 0) && (buf_2[i] != negative_fi_buf[base_index + i])) ||
+ ((mpi_rank % 2 == 1) && (buf_1[i] != negative_fi_buf[base_index + i]))) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in buf (3).\n";
+ }
+ }
+
+ base_index += INTS_PER_RANK / 4;
+
+ for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) {
+
+ if (((mpi_rank % 2 == 0) && (buf_3[i] != negative_fi_buf[base_index + i])) ||
+ ((mpi_rank % 2 == 1) && (buf_0[i] != negative_fi_buf[base_index + i]))) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value in buf (4).\n";
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_read_test_3() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_read_test_4()
+ *
+ * Purpose: Test vector I/O reads with vectors of different lengths
+ * and entry sizes across the ranks. Vectors are not, in
+ * general, sorted in increasing address order. Further,
+ * reads are not, in general, contiguous.
+ *
+ * 1) Open the test file with the specified VFD, set the eoa.
+ * and setup the DXPL.
+ *
+ * 2) Using rank zero, write the entire increasing_fi_buf to
+ * the file.
+ *
+ * 3) Barrier
+ *
+ * 4) Set all cells of read_fi_buf to zero.
+ *
+ * 5) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector read between base_addr and
+ * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1
+ * as follows:
+ *
+ * if ( rank % 4 == 0 ) construct a vector that reads:
+ *
+ * INTS_PER_RANK / 4 * sizeof(int32_t) bytes
+ * starting at base_addr + INTS_PER_RANK / 2 *
+ * sizeof(int32_t),
+ *
+ * INTS_PER_RANK / 8 * sizeof(int32_t) bytes
+ * starting at base_addr + INTS_PER_RANK / 4 *
+ * sizeof(int32_t), and
+ *
+ * INTS_PER_RANK / 16 * sizeof(int32_t) butes
+ * starting at base_addr + INTS_PER_RANK / 16 *
+ * sizeof(int32_t)
+ *
+ * to the equivalent locations in read_fi_buf
+ *
+ * if ( rank % 4 == 1 ) construct a vector that reads:
+ *
+ * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t)
+ * bytes starting at base_addr + sizeof(int32_t), and
+ *
+ * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) bytes
+ * starting at base_addr + (INTS_PER_RANK / 2 + 1) *
+ * sizeof(int32_t).
+ *
+ * to the equivalent locations in read_fi_buf
+ *
+ * if ( rank % 4 == 2 ) construct a vector that reads:
+ *
+ * sizeof(int32_t) bytes starting at base_index +
+ * (INTS_PER_RANK / 2) * sizeof int32_t.
+ *
+ * to the equivalent locations in read_fi_buf
+ *
+ * if ( rank % 4 == 3 ) construct and read the empty vector
+ *
+ * 6) On each rank, verify that read_fi_buf contains the
+ * the expected values -- that is the matching values from
+ * increasing_fi_buf where ever there was a read, and zero
+ * otherwise.
+ *
+ * 7) Barrier.
+ *
+ * 8) Close the test file.
+ *
+ * 9) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_read_test_4()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ haddr_t base_addr;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ int k;
+ int base_index;
+ uint32_t count = 0;
+ H5FD_mem_t types[4];
+ haddr_t addrs[4];
+ size_t sizes[4];
+ void * bufs[4];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector read test 4 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector read test 4 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector read test 4 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Using rank zero, write the entire negative_fi_buf to
+ * the file.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (mpi_rank == 0) {
+
+ if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) <
+ 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite() on rank 0 failed.\n";
+ }
+ }
+ }
+
+ /* 3) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) Set all cells of read_fi_buf to zero. */
+ if (pass) {
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ read_fi_buf[i] = 0;
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector read between base_addr and
+ * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1
+ * as follows:
+ */
+ if (pass) {
+
+ base_index = mpi_rank * INTS_PER_RANK;
+ base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t);
+
+ if ((mpi_rank % 4) == 0) {
+
+ /* if ( rank % 4 == 0 ) construct a vector that reads:
+ *
+ * INTS_PER_RANK / 4 * sizeof(int32_t) bytes
+ * starting at base_addr + INTS_PER_RANK / 2 *
+ * sizeof(int32_t),
+ *
+ * INTS_PER_RANK / 8 * sizeof(int32_t) bytes
+ * starting at base_addr + INTS_PER_RANK / 4 *
+ * sizeof(int32_t), and
+ *
+ * INTS_PER_RANK / 16 * sizeof(int32_t) butes
+ * starting at base_addr + INTS_PER_RANK / 16 *
+ * sizeof(int32_t)
+ *
+ * to the equivalent locations in read_fi_buf
+ */
+
+ count = 3;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t));
+ sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t);
+ bufs[0] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2)]));
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 4) * sizeof(int32_t));
+ sizes[1] = (size_t)(INTS_PER_RANK / 8) * sizeof(int32_t);
+ bufs[1] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 4)]));
+
+ types[2] = H5FD_MEM_DRAW;
+ addrs[2] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 16) * sizeof(int32_t));
+ sizes[2] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t);
+ bufs[2] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 16)]));
+ }
+ else if ((mpi_rank % 4) == 1) {
+
+ /* if ( rank % 4 == 1 ) construct a vector that reads:
+ *
+ * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t)
+ * bytes starting at base_addr + sizeof(int32_t), and
+ *
+ * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) bytes
+ * starting at base_addr + (INTS_PER_RANK / 2 + 1) *
+ * sizeof(int32_t).
+ *
+ * to the equivalent locations in read_fi_buf
+ */
+ count = 2;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr + (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t);
+ bufs[0] = (void *)(&(read_fi_buf[base_index + 1]));
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = base_addr + (haddr_t)((size_t)((INTS_PER_RANK / 2) + 1) * sizeof(int32_t));
+ sizes[1] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t);
+ bufs[1] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2) + 1]));
+ }
+ else if ((mpi_rank % 4) == 2) {
+
+ /* if ( rank % 4 == 2 ) construct a vector that reads:
+ *
+ * sizeof(int32_t) bytes starting at base_index +
+ * (INTS_PER_RANK / 2) * sizeof int32_t.
+ *
+ * to the equivalent locations in read_fi_buf
+ */
+ count = 1;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t));
+ sizes[0] = sizeof(int32_t);
+ bufs[0] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2)]));
+ }
+ else if ((mpi_rank % 4) == 3) {
+
+ /* if ( rank % 4 == 3 ) construct and read the empty vector */
+
+ count = 0;
+ }
+
+ if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread_vector() failed (1).\n";
+ }
+ }
+
+ /* 6) On each rank, verify that read_fi_buf contains the
+ * the expected values -- that is the matching values from
+ * increasing_fi_buf where ever there was a read, and zero
+ * otherwise.
+ */
+ if (pass) {
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+#if 1
+ for (j = base_index; j < base_index + INTS_PER_RANK; j++) {
+
+ k = j - base_index;
+#else
+ for (k = 0; k < INTS_PER_RANK; k++) {
+
+ j = k + base_index;
+#endif
+
+ if (i == mpi_rank) {
+
+ switch (i % 4) {
+
+ case 0:
+ if (((INTS_PER_RANK / 2) <= k) && (k < (3 * (INTS_PER_RANK / 4)))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.1)";
+ HDfprintf(stdout, "\nread_fi_buf[%d] = %d, increasing_fi_buf[%d] = %d\n",
+ j, read_fi_buf[j], j, increasing_fi_buf[j]);
+ }
+ }
+ else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.2)";
+ }
+ }
+ else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.3)";
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.4)";
+ }
+ }
+ break;
+
+ case 1:
+ if ((1 <= k) && (k <= ((INTS_PER_RANK / 2) - 2))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2.1)";
+ }
+ }
+ else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2.2)";
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2.3)";
+ }
+ }
+ break;
+
+ case 2:
+ if (k == INTS_PER_RANK / 2) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3.1)";
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3.2)";
+ }
+ }
+ break;
+
+ case 3:
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (4)";
+ }
+ break;
+
+ default:
+ HDassert(FALSE); /* should be un-reachable */
+ break;
+ }
+ }
+ else if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (5)";
+ }
+ } /* end for loop */
+ } /* end for loop */
+ } /* end if */
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_read_test_4() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_read_test_5()
+ *
+ * Purpose: Test correct management of the sizes[] array optimization,
+ * where, if sizes[i] == 0, we use sizes[i - 1] as the value
+ * of size[j], for j >= i.
+ *
+ * 1) Open the test file with the specified VFD, set the eoa.
+ * and setup the DXPL.
+ *
+ * 2) Using rank zero, write the entire increasing_fi_buf to
+ * the file.
+ *
+ * 3) Barrier
+ *
+ * 4) Set all cells of read_fi_buf to zero.
+ *
+ * 5) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector read between base_addr and
+ * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1
+ * that reads every 16th integer located in that
+ * that range starting at base_addr. Use a sizes[]
+ * array of length 2, with sizes[0] set to sizeof(int32_t),
+ * and sizes[1] = 0.
+ *
+ * Read the integers into the corresponding locations in
+ * read_fi_buf.
+ *
+ * 6) On each rank, verify that read_fi_buf contains the
+ * the expected values -- that is the matching values from
+ * increasing_fi_buf where ever there was a read, and zero
+ * otherwise.
+ *
+ * 7) Barrier.
+ *
+ * 8) Close the test file.
+ *
+ * 9) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_read_test_5()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ haddr_t base_addr;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ int base_index;
+ uint32_t count = 0;
+ H5FD_mem_t types[(INTS_PER_RANK / 16) + 1];
+ haddr_t addrs[(INTS_PER_RANK / 16) + 1];
+ size_t sizes[2];
+ void * bufs[(INTS_PER_RANK / 16) + 1];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector read test 5 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector read test 5 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector read test 5 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Using rank zero, write the entire negative_fi_buf to
+ * the file.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (mpi_rank == 0) {
+
+ if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) <
+ 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite() on rank 0 failed.\n";
+ }
+ }
+ }
+
+ /* 3) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) Set all cells of read_fi_buf to zero. */
+ if (pass) {
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ read_fi_buf[i] = 0;
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector read between base_addr and
+ * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1
+ * that reads every 16th integer located in that
+ * that range starting at base_addr. Use a sizes[]
+ * array of length 2, with sizes[0] set to sizeof(int32_t),
+ * and sizes[1] = 0.
+ *
+ * Read the integers into the corresponding locations in
+ * read_fi_buf.
+ */
+ if (pass) {
+
+ base_index = (mpi_rank * INTS_PER_RANK);
+ base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t);
+
+ count = INTS_PER_RANK / 16;
+ sizes[0] = sizeof(int32_t);
+ sizes[1] = 0;
+
+ for (i = 0; i < INTS_PER_RANK / 16; i++) {
+
+ types[i] = H5FD_MEM_DRAW;
+ addrs[i] = base_addr + ((haddr_t)(16 * i) * (haddr_t)sizeof(int32_t));
+ bufs[i] = (void *)(&(read_fi_buf[base_index + (i * 16)]));
+ }
+
+ if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread_vector() failed (1).\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On each rank, verify that read_fi_buf contains the
+ * the expected values -- that is the matching values from
+ * increasing_fi_buf where ever there was a read, and zero
+ * otherwise.
+ */
+ if (pass) {
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + INTS_PER_RANK; j++) {
+
+ if ((i == mpi_rank) && (j % 16 == 0)) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+ }
+ }
+ else if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+ }
+ } /* end for loop */
+ } /* end for loop */
+ } /* end if */
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_read_test_5() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_1()
+ *
+ * Purpose: Simple vector write test:
+ *
+ * 1) Open the test file with the specified VFD, set the eoa,
+ * and setup the DXPL.
+ *
+ * 2) Write the entire increasing_fi_buf to the file, with
+ * exactly one buffer per vector per rank. Use either
+ * independent or collective I/O as specified.
+ *
+ * 3) Barrier
+ *
+ * 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf. Report failure
+ * if any differences are detected.
+ *
+ * 5) Close the test file.
+ *
+ * 6) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_1()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ uint32_t count;
+ H5FD_mem_t types[1];
+ haddr_t addrs[1];
+ size_t sizes[1];
+ void * bufs[1];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 1 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 1 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 1 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Write the entire increasing_fi_buf to the file, with
+ * exactly one buffer per vector per rank. Use either
+ * independent or collective I/O as specified.
+ */
+
+ if (pass) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
+ */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf. Report failure
+ * if any differences are detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; i < mpi_size * INTS_PER_RANK; i++) {
+
+ if (read_fi_buf[i] != increasing_fi_buf[i]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file";
+ break;
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_1() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_2()
+ *
+ * Purpose: Test vector I/O writes in which only some ranks participate.
+ * Depending on the collective parameter, these writes will
+ * be either collective or independent.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) Write the odd blocks of the increasing_fi_buf to the file,
+ * with the odd ranks writing the odd blocks, and the even
+ * ranks writing an empty vector.
+ *
+ * Here, a "block" of the increasing_fi_buf is a sequence
+ * of integers in increasing_fi_buf of length INTS_PER_RANK,
+ * and with start index a multiple of INTS_PER_RANK.
+ *
+ * 3) Write the even blocks of the negative_fi_buf to the file,
+ * with the even ranks writing the even blocks, and the odd
+ * ranks writing an empty vector.
+ *
+ * 4) Barrier
+ *
+ * 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf and negative_fi_buf
+ * as appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 5) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/28/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_2()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ uint32_t count;
+ H5FD_mem_t types[1];
+ haddr_t addrs[1];
+ size_t sizes[1];
+ void * bufs[1];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 2 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 2 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 2 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Write the odd blocks of the increasing_fi_buf to the file,
+ * with the odd ranks writing the odd blocks, and the even
+ * ranks writing an empty vector.
+ *
+ * Here, a "block" of the increasing_fi_buf is a sequence
+ * of integers in increasing_fi_buf of length INTS_PER_RANK,
+ * and with start index a multiple of INTS_PER_RANK.
+ */
+ if (pass) {
+
+ if (mpi_rank % 2 == 1) { /* odd ranks */
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
+ }
+ else { /* even ranks */
+
+ if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (2).\n";
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Write the even blocks of the negative_fi_buf to the file,
+ * with the even ranks writing the even blocks, and the odd
+ * ranks writing an empty vector.
+ */
+ if (pass) {
+
+ if (mpi_rank % 2 == 1) { /* odd ranks */
+
+ if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (3).\n";
+ }
+ }
+ else { /* even ranks */
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(negative_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (4).\n";
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) Barrier
+ */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf. Report failure
+ * if any differences are detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ if (i % 2 == 1) { /* odd block */
+
+ for (j = i * INTS_PER_RANK; ((pass) && (j < (i + 1) * INTS_PER_RANK)); j++) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file";
+ break;
+ }
+ }
+ }
+ else { /* even block */
+
+ for (j = i * INTS_PER_RANK; ((pass) && (j < (i + 1) * INTS_PER_RANK)); j++) {
+
+ if (read_fi_buf[j] != negative_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file";
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_2() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_3()
+ *
+ * Purpose: Test vector I/O writes with vectors of multiple entries.
+ * For now, keep the vectors sorted in increasing address
+ * order.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) For each rank, construct a vector with base address
+ * (mpi_rank * INTS_PER_RANK) and writing all bytes from
+ * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1.
+ * Draw equal parts from increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf.
+ *
+ * Write to file.
+ *
+ * 3) Barrier
+ *
+ * 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 5) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/31/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_3()";
+ char test_title[120];
+ char filename[512];
+ haddr_t base_addr;
+ int base_index;
+ int ints_per_write;
+ size_t bytes_per_write;
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ uint32_t count;
+ H5FD_mem_t types[4];
+ haddr_t addrs[4];
+ size_t sizes[4];
+ void * bufs[4];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 3 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 3 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 3 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) For each rank, construct a vector with base address
+ * (mpi_rank * INTS_PER_RANK) and writing all bytes from
+ * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1.
+ * Draw equal parts from increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf.
+ *
+ * Write to file.
+ */
+ if (pass) {
+
+ count = 4;
+
+ base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ ints_per_write = INTS_PER_RANK / 4;
+ bytes_per_write = (size_t)(ints_per_write) * sizeof(int32_t);
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr;
+ sizes[0] = bytes_per_write;
+ bufs[0] = (void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = addrs[0] + (haddr_t)(bytes_per_write);
+ sizes[1] = bytes_per_write;
+ bufs[1] = (void *)(&(decreasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 4)]));
+
+ types[2] = H5FD_MEM_DRAW;
+ addrs[2] = addrs[1] + (haddr_t)(bytes_per_write);
+ sizes[2] = bytes_per_write;
+ bufs[2] = (void *)(&(negative_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 2)]));
+
+ types[3] = H5FD_MEM_DRAW;
+ addrs[3] = addrs[2] + (haddr_t)(bytes_per_write);
+ sizes[3] = bytes_per_write;
+ bufs[3] = (void *)(&(zero_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))]));
+
+#if 0 /* JRM */
+ HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n",
+ (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]);
+ HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n",
+ (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]);
+ HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n",
+ (unsigned long long)bufs[0], (unsigned long long)bufs[1],
+ (unsigned long long)bufs[2], (unsigned long long)bufs[3]);
+#endif /* JRM */
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
+ */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+ break;
+ }
+ }
+
+ base_index += (INTS_PER_RANK / 4);
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != decreasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+ break;
+ }
+ }
+
+ base_index += (INTS_PER_RANK / 4);
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != negative_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3)";
+ break;
+ }
+ }
+
+ base_index += (INTS_PER_RANK / 4);
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != zero_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3)";
+ break;
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_3() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_4()
+ *
+ * Purpose: Test vector I/O writes with vectors of multiple entries.
+ * For now, keep the vectors sorted in increasing address
+ * order.
+ *
+ * This test differs from vector_write_test_3() in the order
+ * in which the file image buffers appear in the vector
+ * write. This guarantees that at least one of these
+ * tests will present buffers with non-increasing addresses
+ * in RAM.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) For each rank, construct a vector with base address
+ * (mpi_rank * INTS_PER_RANK) and writing all bytes from
+ * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1.
+ * Draw equal parts from zero_fi_buf, negative_fi_buf,
+ * decreasing_fi_buf, and increasing_fi_buf.
+ *
+ * Write to file.
+ *
+ * 3) Barrier
+ *
+ * 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, negative_fi_buf,
+ * decreasing_fi_buf, and increasing_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 5) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/31/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_4()";
+ char test_title[120];
+ char filename[512];
+ haddr_t base_addr;
+ int base_index;
+ int ints_per_write;
+ size_t bytes_per_write;
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ uint32_t count;
+ H5FD_mem_t types[4];
+ haddr_t addrs[4];
+ size_t sizes[4];
+ void * bufs[4];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 4 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 4 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 4 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) For each rank, construct a vector with base address
+ * (mpi_rank * INTS_PER_RANK) and writing all bytes from
+ * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1.
+ * Draw equal parts from increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf.
+ *
+ * Write to file.
+ */
+ if (pass) {
+
+ count = 4;
+
+ base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ ints_per_write = INTS_PER_RANK / 4;
+ bytes_per_write = (size_t)(ints_per_write) * sizeof(int32_t);
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr;
+ sizes[0] = bytes_per_write;
+ bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = addrs[0] + (haddr_t)(bytes_per_write);
+ sizes[1] = bytes_per_write;
+ bufs[1] = (void *)(&(negative_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 4)]));
+
+ types[2] = H5FD_MEM_DRAW;
+ addrs[2] = addrs[1] + (haddr_t)(bytes_per_write);
+ sizes[2] = bytes_per_write;
+ bufs[2] = (void *)(&(decreasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 2)]));
+
+ types[3] = H5FD_MEM_DRAW;
+ addrs[3] = addrs[2] + (haddr_t)(bytes_per_write);
+ sizes[3] = bytes_per_write;
+ bufs[3] = (void *)(&(increasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))]));
+
+#if 0 /* JRM */
+ HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n",
+ (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]);
+ HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n",
+ (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]);
+ HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n",
+ (unsigned long long)bufs[0], (unsigned long long)bufs[1],
+ (unsigned long long)bufs[2], (unsigned long long)bufs[3]);
+#endif /* JRM */
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
+ */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != zero_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+ break;
+ }
+ }
+
+ base_index += (INTS_PER_RANK / 4);
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != negative_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+ break;
+ }
+ }
+
+ base_index += (INTS_PER_RANK / 4);
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != decreasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3)";
+ break;
+ }
+ }
+
+ base_index += (INTS_PER_RANK / 4);
+
+ for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3)";
+ break;
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_4() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_5()
+ *
+ * Purpose: Test vector I/O writes with vectors of different lengths
+ * and entry sizes across the ranks. Vectors are not, in
+ * general, sorted in increasing address order. Further,
+ * writes are not, in general, contiguous.
+ *
+ * 1) Open the test file with the specified VFD, and set
+ * the eoa.
+ *
+ * 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ *
+ * 3) Barrier
+ *
+ * 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector write between base_addr and
+ * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1
+ * as follows:
+ *
+ * if ( rank % 4 == 0 ) construct a vector that writes:
+ *
+ * negative_fi_buf starting at base_index +
+ * INTS_PER_RANK / 2 and running for INTS_PER_RANK / 4
+ * entries,
+ *
+ * decreasing_fi_buf starting at base_index +
+ * INTS_PER_RANK / 4 and running for INTS_PER_RANK / 8
+ * entries, and
+ *
+ * increasing_fi_buf starting at base_index +
+ * INTS_PER_RANK / 16 and running for INTS_PER_RANK / 16
+ * entries
+ *
+ * to the equivalent locations in the file.
+ *
+ * if ( rank % 4 == 1 ) construct a vector that writes:
+ *
+ * increasing_fi_buf starting at base_index + 1 and
+ * running for (INTS_PER_RANK / 2) - 2 entries, and
+ *
+ * decreasing_fi_buf startomg at base_index +
+ * INTS_PER_RANK / 2 + 1 and running for (INTS_PER_RANK / 2)
+ * - 2 entries
+ *
+ * if ( rank % 4 == 2 ) construct a vector that writes:
+ *
+ * negative_fi_buf starting at base_index +
+ * INTS_PER_RANK / 2 and running for one entry.
+ *
+ * if ( rank % 4 == 3 ) construct and write the empty vector
+ *
+ * 5) Barrier
+ *
+ * 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, negative_fi_buf,
+ * decreasing_fi_buf, and increasing_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 7) Close the test file. On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/31/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_5()";
+ char test_title[120];
+ char filename[512];
+ haddr_t base_addr;
+ int base_index;
+ haddr_t eoa;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int j;
+ int k;
+ uint32_t count;
+ H5FD_mem_t types[4];
+ haddr_t addrs[4];
+ size_t sizes[4];
+ void * bufs[4];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 5 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 5 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 5 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Set the test file in a known state by writing zeros
+ * to all bytes in the test file. Since we have already
+ * tested this, do this via a vector write of zero_fi_buf.
+ */
+ if (pass) {
+
+ count = 1;
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t);
+ bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK]));
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed.\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 3) Barrier
+ */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector write between base_addr and
+ * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1
+ * as follows:
+ */
+ if (pass) {
+
+ base_index = mpi_rank * INTS_PER_RANK;
+ base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t));
+
+ if ((mpi_rank % 4) == 0) {
+
+ /* if ( rank % 4 == 0 ) construct a vector that writes:
+ *
+ * negative_fi_buf starting at base_index +
+ * INTS_PER_RANK / 2 and running for INTS_PER_RANK / 4
+ * entries,
+ *
+ * decreasing_fi_buf starting at base_index +
+ * INTS_PER_RANK / 4 and running for INTS_PER_RANK / 8
+ * entries, and
+ *
+ * increasing_fi_buf starting at base_index +
+ * INTS_PER_RANK / 16 and running for INTS_PER_RANK / 16
+ * entries
+ *
+ * to the equivalent locations in the file.
+ */
+ count = 3;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t));
+ sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t);
+ bufs[0] = (void *)(&(negative_fi_buf[base_index + (INTS_PER_RANK / 2)]));
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 4) * sizeof(int32_t));
+ sizes[1] = (size_t)(INTS_PER_RANK / 8) * sizeof(int32_t);
+ bufs[1] = (void *)(&(decreasing_fi_buf[base_index + (INTS_PER_RANK / 4)]));
+
+ types[2] = H5FD_MEM_DRAW;
+ addrs[2] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 16) * sizeof(int32_t));
+ sizes[2] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t);
+ bufs[2] = (void *)(&(increasing_fi_buf[base_index + (INTS_PER_RANK / 16)]));
+ }
+ else if ((mpi_rank % 4) == 1) {
+
+ /* if ( rank % 4 == 1 ) construct a vector that writes:
+ *
+ * increasing_fi_buf starting at base_index + 1 and
+ * running for (INTS_PER_RANK / 2) - 2 entries, and
+ *
+ * decreasing_fi_buf startomg at base_addr +
+ * INTS_PER_RANK / 2 + 1 and running for (INTS_PER_RANK / 2)
+ * - 2 entries
+ *
+ * to the equivalent locations in the file.
+ */
+ count = 2;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr + (haddr_t)(sizeof(int32_t));
+ sizes[0] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t);
+ bufs[0] = (void *)(&(increasing_fi_buf[base_index + 1]));
+
+ types[1] = H5FD_MEM_DRAW;
+ addrs[1] = base_addr + (haddr_t)((size_t)((INTS_PER_RANK / 2) + 1) * sizeof(int32_t));
+ sizes[1] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t);
+ bufs[1] = (void *)(&(decreasing_fi_buf[base_index + (INTS_PER_RANK / 2) + 1]));
+ }
+ else if ((mpi_rank % 4) == 2) {
+
+ /* if ( rank % 4 == 2 ) construct a vector that writes:
+ *
+ * negative_fi_buf starting at base_index +
+ * INTS_PER_RANK / 2 and running for one entry.
+ *
+ * to the equivalent location in the file.
+ */
+ count = 1;
+
+ types[0] = H5FD_MEM_DRAW;
+ addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t));
+ sizes[0] = sizeof(int32_t);
+ bufs[0] = (void *)(&(negative_fi_buf[base_index + (INTS_PER_RANK / 2)]));
+ }
+ else if ((mpi_rank % 4) == 3) {
+
+ /* if ( rank % 4 == 3 ) construct and write the empty vector */
+
+ count = 0;
+ }
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against increasing_fi_buf,
+ * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as
+ * appropriate. Report failure if any differences are
+ * detected.
+ */
+
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size)); i++) {
+
+ base_index = i * INTS_PER_RANK;
+
+ for (j = base_index; j < base_index + INTS_PER_RANK; j++) {
+
+ k = j - base_index;
+
+ switch (i % 4) {
+
+ case 0:
+ if (((INTS_PER_RANK / 2) <= k) && (k < (3 * (INTS_PER_RANK / 4)))) {
+
+ if (read_fi_buf[j] != negative_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.1)";
+ }
+ }
+ else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) {
+
+ if (read_fi_buf[j] != decreasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.2)";
+ }
+ }
+ else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.3)";
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1.4)";
+ }
+ }
+ break;
+
+ case 1:
+ if ((1 <= k) && (k <= ((INTS_PER_RANK / 2) - 2))) {
+
+ if (read_fi_buf[j] != increasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2.1)";
+ }
+ }
+ else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) {
+
+ if (read_fi_buf[j] != decreasing_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2.2)";
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2.3)";
+ }
+ }
+ break;
+
+ case 2:
+ if (k == INTS_PER_RANK / 2) {
+
+ if (read_fi_buf[j] != negative_fi_buf[j]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3.1)";
+ }
+ }
+ else {
+
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (3.2)";
+ }
+ }
+ break;
+
+ case 3:
+ if (read_fi_buf[j] != 0) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (4)";
+ }
+ break;
+
+ default:
+ HDassert(FALSE); /* should be un-reachable */
+ break;
+ }
+ }
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_5() */
+
+/*-------------------------------------------------------------------------
+ * Function: vector_write_test_6()
+ *
+ * Purpose: Test correct management of the sizes[] array optimization,
+ * where, if sizes[i] == 0, we use sizes[i - 1] as the value
+ * of size[j], for j >= i.
+ *
+ * 1) Open the test file with the specified VFD, set the eoa.
+ * and setup the DXPL.
+ *
+ * 2) Using rank zero, write the entire zero_fi_buf to
+ * the file.
+ *
+ * 3) Barrier
+ *
+ * 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector write from increasing_fi_buf between
+ * base_addr and base_addr + INTS_PER_RANK *
+ * sizeof(int32_t) - 1 that writes every 16th integer
+ * located in that range starting at base_addr.
+ * Use a sizes[] array of length 2, with sizes[0] set
+ * to sizeof(int32_t), and sizes[1] = 0.
+ *
+ * Write the integers into the corresponding locations in
+ * the file.
+ *
+ * 5) Barrier
+ *
+ * 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, and increasing_fi_buf
+ * as appropriate. Report failure if any differences are
+ * detected.
+ *
+ * 7) Barrier.
+ *
+ * 8) Close the test file.
+ *
+ * 9) On rank 0, delete the test file.
+ *
+ * Return: FALSE on success, TRUE if any errors are detected.
+ *
+ * Programmer: John Mainzer
+ * 3/26/21
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode,
+ H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name)
+{
+ const char *fcn_name = "vector_write_test_6()";
+ char test_title[120];
+ char filename[512];
+ haddr_t eoa;
+ haddr_t base_addr;
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1; /* file access property list ID */
+ hid_t dxpl_id = -1; /* data access property list ID */
+ H5FD_t * lf = NULL; /* VFD struct ptr */
+ int cp = 0;
+ int i;
+ int base_index;
+ uint32_t count = 0;
+ H5FD_mem_t types[(INTS_PER_RANK / 16) + 1];
+ haddr_t addrs[(INTS_PER_RANK / 16) + 1];
+ size_t sizes[2];
+ void * bufs[(INTS_PER_RANK / 16) + 1];
+
+ pass = TRUE;
+
+ if (mpi_rank == 0) {
+
+ if (xfer_mode == H5FD_MPIO_INDEPENDENT) {
+
+ sprintf(test_title, "parallel vector write test 6 -- %s / independent", vfd_name);
+ }
+ else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) {
+
+ sprintf(test_title, "parallel vector write test 6 -- %s / col op / ind I/O", vfd_name);
+ }
+ else {
+
+ HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO);
+
+ sprintf(test_title, "parallel vector write test 6 -- %s / col op / col I/O", vfd_name);
+ }
+
+ TESTING(test_title);
+ }
+
+ show_progress = ((show_progress) && (mpi_rank == 0));
+
+ if (show_progress)
+ HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */
+ if (pass) {
+
+ eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t));
+
+ setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf,
+ &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 2) Using rank zero, write the entire negative_fi_buf to
+ * the file.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (mpi_rank == 0) {
+
+ if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)zero_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite() on rank 0 failed.\n";
+ }
+ }
+ }
+
+ /* 3) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 4) For each rank, define base_index equal to:
+ *
+ * mpi_rank * INTS_PER_RANK
+ *
+ * and define base_addr equal to
+ *
+ * base_index * sizeof(int32_t).
+ *
+ * Setup a vector write from increasing_fi_buf between
+ * base_addr and base_addr + INTS_PER_RANK *
+ * sizeof(int32_t) - 1 that writes every 16th integer
+ * located in that range starting at base_addr.
+ * Use a sizes[] array of length 2, with sizes[0] set
+ * to sizeof(int32_t), and sizes[1] = 0.
+ *
+ * Write the integers into the corresponding locations in
+ * the file.
+ */
+ if (pass) {
+
+ base_index = (mpi_rank * INTS_PER_RANK);
+ base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t);
+
+ count = INTS_PER_RANK / 16;
+ sizes[0] = sizeof(int32_t);
+ sizes[1] = 0;
+
+ for (i = 0; i < INTS_PER_RANK / 16; i++) {
+
+ types[i] = H5FD_MEM_DRAW;
+ addrs[i] = base_addr + ((haddr_t)(16 * i) * (haddr_t)sizeof(int32_t));
+ bufs[i] = (void *)(&(increasing_fi_buf[base_index + (i * 16)]));
+ }
+
+ if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDwrite_vector() failed (1).\n";
+ }
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 6) On each rank, read the entire file into the read_fi_buf,
+ * and compare against zero_fi_buf, and increasing_fi_buf
+ * as appropriate. Report failure if any differences are
+ * detected.
+ */
+ if (pass) {
+
+ size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t);
+
+ if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5FDread() failed.\n";
+ }
+
+ for (i = 0; ((pass) && (i < mpi_size * INTS_PER_RANK)); i++) {
+
+ if (i % 16 == 0) {
+
+ if (read_fi_buf[i] != increasing_fi_buf[i]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (1)";
+ }
+ }
+ else if (read_fi_buf[i] != zero_fi_buf[i]) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected data read from file (2)";
+ }
+ }
+ } /* end if */
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 7) Barrier */
+
+ if (pass) {
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Close the test file and delete it (on rank 0 only).
+ * Close FAPL and DXPL.
+ */
+
+ if (pass) {
+
+ takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id);
+ }
+
+ if (show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* report results */
+ if (mpi_rank == 0) {
+
+ if (pass) {
+
+ PASSED();
+ }
+ else {
+
+ H5_FAILED();
+
+ if (show_progress) {
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg);
+ }
+ }
+ }
+
+ return (!pass);
+
+} /* vector_write_test_6() */
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run parallel VFD tests.
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: John Mainzer
+ * 3/2621/
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+int
+main(int argc, char **argv)
+{
+ unsigned nerrs = 0;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size;
+ int mpi_rank;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * occur during the test and the process is aborted, it will not hang
+ * in the atexit post processing. If it does, it may try to make MPI
+ * calls which may not work.
+ */
+ if (H5dont_atexit() < 0)
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank);
+
+ H5open();
+
+ if (mpi_rank == 0) {
+ HDprintf("=========================================\n");
+ HDprintf("Parallel virtual file driver (VFD) tests\n");
+ HDprintf(" mpi_size = %d\n", mpi_size);
+ HDprintf("=========================================\n");
+ }
+
+ if (mpi_size < 2) {
+ if (mpi_rank == 0)
+ HDprintf(" Need at least 2 processes. Exiting.\n");
+ goto finish;
+ }
+
+ alloc_and_init_file_images(mpi_size);
+
+ if (!pass) {
+
+ HDprintf("\nAllocation and initialize of file image buffers failed. Test aborted.\n");
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ // sleep(60);
+
+ nerrs +=
+ vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs += vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+ nerrs +=
+ vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio");
+ nerrs +=
+ vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio");
+
+finish:
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (mpi_rank == 0) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrs > 0)
+ HDprintf("***parallel vfd tests detected %d failures***\n", nerrs);
+ else
+ HDprintf("parallel vfd tests finished with no failures\n");
+ HDprintf("===================================\n");
+ }
+
+ /* discard the file image buffers */
+ free_file_images();
+
+ /* close HDF5 library */
+ H5close();
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (nerrs) because exit code is limited to 1byte */
+ return (nerrs > 0);
+
+} /* main() */
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index c692287..16f45d3 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -186,10 +186,6 @@ enum H5TEST_COLL_CHUNK_API {
#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
-#define TEST_FILTERS 0x080
-/* TEST_FILTERS will take place of this after supporting mpio + filter for
- * H5Dcreate and H5Dwrite */
-#define TEST_FILTERS_READ 0x100
/* Don't erase these lines, they are put here for debugging purposes */
/*
diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c
index 97bae24..2e7c3be 100644
--- a/tools/lib/h5diff.c
+++ b/tools/lib/h5diff.c
@@ -1371,7 +1371,7 @@ hyperslab_pdiff(hid_t file1_id, const char *path1, hid_t file2_id, const char *p
* This should be the princpal use case for this function.
* We may may also deal with links.
* NOTE: This particular function is ONLY called when we have identified
- * the specfic input object as a dataset which contain a moderately
+ * the specific input object as a dataset which contain a moderately
* large number of elements (DEFAULT_LARGE_DSET_SIZE = 1M). The concern
* is two fold. 1) By utilizing parallelism, we can improve performance;
* and 2) The memory footprint utilized for processing the dataset
@@ -3382,7 +3382,7 @@ diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1, hid_t file2_id,
H5TOOLS_DEBUG("Beginning of big else block");
/* We're in parallel mode */
/* Since the data type of diff value is hsize_t which can
- * be arbitary large such that there is no MPI type that
+ * be arbitrary large such that there is no MPI type that
* matches it, the value is passed between processes as
* an array of bytes in order to be portable. But this
* may not work in non-homogeneous MPI environments.
diff --git a/tools/lib/h5diff_array.c b/tools/lib/h5diff_array.c
index 1b0d36f..a7156b5 100644
--- a/tools/lib/h5diff_array.c
+++ b/tools/lib/h5diff_array.c
@@ -204,7 +204,8 @@ diff_array(void *_mem1, void *_mem2, diff_opt_t *opts, hid_t container1_id, hid_
mcomp_t members;
H5T_class_t type_class;
- H5TOOLS_START_DEBUG(" - rank:%d hs_nelmts:%lld errstat:%d", opts->rank, opts->hs_nelmts, opts->err_stat);
+ H5TOOLS_START_DEBUG(" - rank:%d hs_nelmts:%" PRIuHSIZE " errstat:%d", opts->rank, opts->hs_nelmts,
+ opts->err_stat);
opts->print_header = 1; /* enable print header */
/* get the size. */
@@ -388,7 +389,8 @@ diff_array(void *_mem1, void *_mem2, diff_opt_t *opts, hid_t container1_id, hid_
HDmemset(&members, 0, sizeof(mcomp_t));
get_member_types(opts->m_tid, &members);
for (i = 0; i < opts->hs_nelmts; i++) {
- H5TOOLS_DEBUG("opts->pos[%lld]:%lld - nelmts:%lld", i, opts->pos[i], opts->hs_nelmts);
+ H5TOOLS_DEBUG("opts->pos[%" PRIuHSIZE "]:%" PRIuHSIZE " - nelmts:%" PRIuHSIZE, i,
+ opts->pos[i], opts->hs_nelmts);
nfound += diff_datum(mem1 + i * size, mem2 + i * size, i, opts, container1_id, container2_id,
&members);
if (opts->count_bool && nfound >= opts->count)
@@ -396,7 +398,7 @@ diff_array(void *_mem1, void *_mem2, diff_opt_t *opts, hid_t container1_id, hid_
} /* i */
close_member_types(&members);
} /* switch */
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -452,7 +454,8 @@ diff_datum(void *_mem1, void *_mem2, hsize_t elemtno, diff_opt_t *opts, hid_t co
hsize_t nfound = 0; /* differences found */
diff_err_t ret_value = opts->err_stat;
- H5TOOLS_START_DEBUG("ph:%d elemtno:%lld - errstat:%d", opts->print_header, elemtno, opts->err_stat);
+ H5TOOLS_START_DEBUG("ph:%d elemtno:%" PRIuHSIZE " - errstat:%d", opts->print_header, elemtno,
+ opts->err_stat);
type_size = H5Tget_size(opts->m_tid);
type_class = H5Tget_class(opts->m_tid);
@@ -702,8 +705,8 @@ diff_datum(void *_mem1, void *_mem2, hsize_t elemtno, diff_opt_t *opts, hid_t co
H5TOOLS_DEBUG("H5T_ARRAY ph=%d", opts->print_header);
arr_opts = *opts;
- H5TOOLS_DEBUG("Check opts: hs_nelmts:%lld to %lld rank:%d to %d", opts->hs_nelmts,
- arr_opts.hs_nelmts, opts->rank, arr_opts.rank);
+ H5TOOLS_DEBUG("Check opts: hs_nelmts:%" PRIuHSIZE " to %" PRIuHSIZE " rank:%d to %d",
+ opts->hs_nelmts, arr_opts.hs_nelmts, opts->rank, arr_opts.rank);
/* get the array's base datatype for each element */
arr_opts.m_tid = H5Tget_super(opts->m_tid);
size = H5Tget_size(arr_opts.m_tid);
@@ -1051,8 +1054,8 @@ diff_datum(void *_mem1, void *_mem2, hsize_t elemtno, diff_opt_t *opts, hid_t co
H5TOOLS_INFO("H5Rdestroy H5R_OBJECT1 failed");
if (H5Rdestroy(ref1_buf) < 0)
H5TOOLS_INFO("H5Rdestroy H5R_OBJECT1 failed");
- H5TOOLS_DEBUG("H5T_REFERENCE - H5T_STD_REF complete nfound:%lld - errstat:%d", nfound,
- ref_opts.err_stat);
+ H5TOOLS_DEBUG("H5T_REFERENCE - H5T_STD_REF complete nfound:%" PRIuHSIZE " - errstat:%d",
+ nfound, ref_opts.err_stat);
}
/*-------------------------------------------------------------------------
* H5T_STD_REF_DSETREG
@@ -1263,7 +1266,7 @@ diff_datum(void *_mem1, void *_mem2, hsize_t elemtno, diff_opt_t *opts, hid_t co
done:
opts->err_stat = opts->err_stat | ret_value;
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -1374,8 +1377,8 @@ diff_region(hid_t obj1_id, hid_t obj2_id, hid_t region1_id, hid_t region2_id, di
npoints2 = H5Sget_select_elem_npoints(region2_id);
}
H5E_END_TRY;
- H5TOOLS_DEBUG("blocks: 1=%lld-2=%lld", nblocks1, nblocks2);
- H5TOOLS_DEBUG("points: 1=%lld-2=%lld", npoints1, npoints2);
+ H5TOOLS_DEBUG("blocks: 1=%" PRIdHSIZE "-2=%" PRIdHSIZE, nblocks1, nblocks2);
+ H5TOOLS_DEBUG("points: 1=%" PRIdHSIZE "-2=%" PRIdHSIZE, npoints1, npoints2);
if (nblocks1 != nblocks2 || npoints1 != npoints2 || ndims1 != ndims2) {
opts->not_cmp = 1;
@@ -1518,10 +1521,10 @@ diff_region(hid_t obj1_id, hid_t obj2_id, hid_t region1_id, hid_t region2_id, di
#if defined(H5DIFF_DEBUG)
for (i = 0; i < npoints1; i++) {
- parallel_print("%sPt%lu: ", i ? "," : "", (unsigned long)i);
+ parallel_print("%sPt%d: ", i ? "," : "", i);
for (j = 0; j < ndims1; j++)
- parallel_print("%s%lu", j ? "," : "(", (unsigned long)(ptdata1[i * ndims1 + j]));
+ parallel_print("%s%" PRIuHSIZE, j ? "," : "(", ptdata1[i * ndims1 + j]);
parallel_print(")");
}
@@ -1538,7 +1541,7 @@ diff_region(hid_t obj1_id, hid_t obj2_id, hid_t region1_id, hid_t region2_id, di
ret_value = nfound_p + nfound_b;
done:
- H5TOOLS_ENDDEBUG(" with diffs:%lld", ret_value);
+ H5TOOLS_ENDDEBUG(" with diffs:%" PRIuHSIZE, ret_value);
return ret_value;
}
@@ -1575,7 +1578,7 @@ character_compare(char *mem1, char *mem2, hsize_t elemtno, size_t u, diff_opt_t
}
nfound++;
}
- H5TOOLS_ENDDEBUG(": %lld", nfound);
+ H5TOOLS_ENDDEBUG(": %" PRIuHSIZE, nfound);
return nfound;
}
@@ -1649,7 +1652,7 @@ character_compare_opt(unsigned char *mem1, unsigned char *mem2, hsize_t elemtno,
nfound++;
}
- H5TOOLS_ENDDEBUG(": %lld zero:%d", nfound, both_zero);
+ H5TOOLS_ENDDEBUG(": %" PRIuHSIZE " zero:%d", nfound, both_zero);
return nfound;
}
@@ -1830,7 +1833,7 @@ diff_float_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, d
}
}
- H5TOOLS_ENDDEBUG(": %lld zero:%d", nfound, both_zero);
+ H5TOOLS_ENDDEBUG(": %" PRIuHSIZE " zero:%d", nfound, both_zero);
return nfound;
}
@@ -2001,7 +2004,7 @@ diff_double_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
nfound++;
}
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2063,7 +2066,7 @@ diff_ldouble_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
opts->print_percentage = 0;
print_pos(opts, elem_idx, 0);
if (print_data(opts)) {
- parallel_print(F_FORMAT, temp1_double, temp2_double, ABS(temp1_double - temp2_double));
+ parallel_print(LD_FORMAT, temp1_double, temp2_double, ABS(temp1_double - temp2_double));
}
nfound++;
}
@@ -2110,7 +2113,7 @@ diff_ldouble_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
opts->print_percentage = 0;
print_pos(opts, elem_idx, 0);
if (print_data(opts)) {
- parallel_print(F_FORMAT, temp1_double, temp2_double, ABS(temp1_double - temp2_double));
+ parallel_print(LD_FORMAT, temp1_double, temp2_double, ABS(temp1_double - temp2_double));
}
nfound++;
}
@@ -2157,7 +2160,7 @@ diff_ldouble_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
opts->print_percentage = 0;
print_pos(opts, elem_idx, 0);
if (print_data(opts)) {
- parallel_print(F_FORMAT, temp1_double, temp2_double, ABS(temp1_double - temp2_double));
+ parallel_print(LD_FORMAT, temp1_double, temp2_double, ABS(temp1_double - temp2_double));
}
nfound++;
}
@@ -2175,7 +2178,7 @@ diff_ldouble_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2263,7 +2266,7 @@ diff_schar_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, d
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2351,7 +2354,7 @@ diff_uchar_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, d
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2439,7 +2442,7 @@ diff_short_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, d
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2531,7 +2534,7 @@ diff_ushort_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2619,7 +2622,7 @@ diff_int_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, dif
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2707,7 +2710,7 @@ diff_uint_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, di
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2795,7 +2798,7 @@ diff_long_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, di
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2885,7 +2888,7 @@ diff_ulong_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, d
nfound++;
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -2978,7 +2981,7 @@ diff_llong_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx, d
}
}
- H5TOOLS_ENDDEBUG(":%lld - errstat:%d", nfound, opts->err_stat);
+ H5TOOLS_ENDDEBUG(":%" PRIuHSIZE " - errstat:%d", nfound, opts->err_stat);
return nfound;
}
@@ -3078,7 +3081,7 @@ diff_ullong_element(unsigned char *mem1, unsigned char *mem2, hsize_t elem_idx,
}
}
- H5TOOLS_ENDDEBUG(": %lld zero:%d", nfound, both_zero);
+ H5TOOLS_ENDDEBUG(": %" PRIuHSIZE " zero:%d", nfound, both_zero);
return nfound;
}
@@ -3323,7 +3326,7 @@ print_header(diff_opt_t *opts)
static void
print_pos(diff_opt_t *opts, hsize_t idx, size_t u)
{
- H5TOOLS_START_DEBUG(" -- idx:%lld", idx);
+ H5TOOLS_START_DEBUG(" -- idx:%" PRIuHSIZE, idx);
if (print_data(opts)) {
hsize_t curr_pos = idx;
@@ -3336,8 +3339,8 @@ print_pos(diff_opt_t *opts, hsize_t idx, size_t u)
H5TOOLS_DEBUG("rank=%d", opts->rank);
if (opts->rank > 0) {
parallel_print("[ ");
- H5TOOLS_DEBUG("do calc_acc_pos[%lld] nelmts:%lld - errstat:%d", idx, opts->hs_nelmts,
- opts->err_stat);
+ H5TOOLS_DEBUG("do calc_acc_pos[%" PRIuHSIZE "] nelmts:%" PRIuHSIZE " - errstat:%d", idx,
+ opts->hs_nelmts, opts->err_stat);
if (opts->sset[0] != NULL) {
/* Subsetting is used - calculate total position */
hsize_t curr_idx = 0; /* current pos in the selection space for each dimension */
@@ -3364,33 +3367,36 @@ print_pos(diff_opt_t *opts, hsize_t idx, size_t u)
j = opts->rank - i - 1;
prev_total_dim_size *= prev_dim_size;
dim_size = opts->dims[j];
- H5TOOLS_DEBUG("j=%d, dim_size=%lld, prev_dim_size=%lld, total_dim_size=%lld, "
- "prev_total_dim_size=%lld",
+ H5TOOLS_DEBUG("j=%d, dim_size=%" PRIuHSIZE ", prev_dim_size=%" PRIuHSIZE
+ ", total_dim_size=%" PRIuHSIZE ", "
+ "prev_total_dim_size=%" PRIuHSIZE,
j, dim_size, prev_dim_size, total_dim_size, prev_total_dim_size);
count = opts->sset[0]->count.data[j];
block = opts->sset[0]->block.data[j];
stride = opts->sset[0]->stride.data[j];
- H5TOOLS_DEBUG("stride=%lld, count=%lld, block=%lld", stride, count, block);
+ H5TOOLS_DEBUG("stride=%" PRIuHSIZE ", count=%" PRIuHSIZE ", block=%" PRIuHSIZE,
+ stride, count, block);
tmp = count * block;
k0 = curr_idx / tmp;
k1 = curr_idx % tmp;
curr_pos += k1 * stride * prev_total_dim_size;
- H5TOOLS_DEBUG("curr_idx=%lld, k0=%lld, k1=%lld, curr_pos=%lld", curr_idx, k0, k1,
- curr_pos);
+ H5TOOLS_DEBUG("curr_idx=%" PRIuHSIZE ", k0=%" PRIuHSIZE ", k1=%" PRIuHSIZE
+ ", curr_pos=%" PRIuHSIZE,
+ curr_idx, k0, k1, curr_pos);
if (k0 > 0)
curr_idx = k0 * total_dim_size;
- H5TOOLS_DEBUG("curr_idx=%lld, tmp=%lld", curr_idx, tmp);
+ H5TOOLS_DEBUG("curr_idx=%" PRIuHSIZE ", tmp=%" PRIuHSIZE, curr_idx, tmp);
total_dim_size *= dim_size;
/* if last calculation exists within in current dimension */
if (k0 == 0)
break;
- H5TOOLS_DEBUG("j=%d, curr_pos=%lld", j, curr_pos);
+ H5TOOLS_DEBUG("j=%d, curr_pos=%" PRIuHSIZE, j, curr_pos);
prev_dim_size = dim_size;
}
/* check if there is a final calculation needed for slowest changing dimension */
if (k0 > 0)
curr_pos += k0 * stride * prev_total_dim_size;
- H5TOOLS_DEBUG("4:curr_idx=%lld, curr_pos=%lld", curr_idx, curr_pos);
+ H5TOOLS_DEBUG("4:curr_idx=%" PRIuHSIZE ", curr_pos=%" PRIuHSIZE, curr_idx, curr_pos);
}
}
/*
@@ -3400,11 +3406,11 @@ print_pos(diff_opt_t *opts, hsize_t idx, size_t u)
calc_acc_pos((unsigned)opts->rank, curr_pos, opts->acc, opts->pos);
for (int i = 0; i < opts->rank; i++) {
- H5TOOLS_DEBUG("pos loop:%d with opts->pos=%lld opts->sm_pos=%lld", i, opts->pos[i],
- opts->sm_pos[i]);
+ H5TOOLS_DEBUG("pos loop:%d with opts->pos=%" PRIuHSIZE " opts->sm_pos=%" PRIuHSIZE, i,
+ opts->pos[i], opts->sm_pos[i]);
opts->pos[i] += (unsigned long)opts->sm_pos[i];
- H5TOOLS_DEBUG("pos loop:%d with opts->pos=%lld", i, opts->pos[i]);
- parallel_print(HSIZE_T_FORMAT, (unsigned long long)opts->pos[i]);
+ H5TOOLS_DEBUG("pos loop:%d with opts->pos=%" PRIuHSIZE, i, opts->pos[i]);
+ parallel_print("%" PRIuHSIZE, opts->pos[i]);
parallel_print(" ");
}
parallel_print("]");
diff --git a/tools/lib/h5diff_attr.c b/tools/lib/h5diff_attr.c
index 5c1777a..63b6d8f 100644
--- a/tools/lib/h5diff_attr.c
+++ b/tools/lib/h5diff_attr.c
@@ -292,7 +292,7 @@ build_match_list_attrs(hid_t loc1_id, hid_t loc2_id, table_attrs_t **table_out,
}
if (opts->mode_verbose_level >= 1)
- parallel_print("Attributes status: %d common, %d only in obj1, %d only in obj2\n",
+ parallel_print("Attributes status: %zu common, %zu only in obj1, %zu only in obj2\n",
table_lp->nattrs - table_lp->nattrs_only1 - table_lp->nattrs_only2,
table_lp->nattrs_only1, table_lp->nattrs_only2);
diff --git a/tools/lib/h5diff_util.c b/tools/lib/h5diff_util.c
index 34fe4e8..8ef57f4 100644
--- a/tools/lib/h5diff_util.c
+++ b/tools/lib/h5diff_util.c
@@ -40,11 +40,11 @@ print_dimensions(int rank, hsize_t *dims)
else {
parallel_print("[");
for (i = 0; i < rank - 1; i++) {
- parallel_print(HSIZE_T_FORMAT, dims[i]);
+ parallel_print("%" PRIuHSIZE, dims[i]);
parallel_print("x");
}
- parallel_print(HSIZE_T_FORMAT, dims[rank - 1]);
+ parallel_print("%" PRIuHSIZE, dims[rank - 1]);
parallel_print("]");
}
}
diff --git a/tools/lib/h5tools.c b/tools/lib/h5tools.c
index db8df16..c7dba25 100644
--- a/tools/lib/h5tools.c
+++ b/tools/lib/h5tools.c
@@ -945,7 +945,7 @@ h5tools_fopen(const char *fname, unsigned flags, hid_t fapl_id, hbool_t use_spec
* as TRUE, we should return failure now since the file couldn't be opened with
* the VFL driver/VOL connector that was set on the FAPL by the caller.
*/
- if (fid < 0 && use_specific_driver)
+ if (use_specific_driver)
H5TOOLS_GOTO_ERROR(H5I_INVALID_HID, "failed to open file using specified FAPL");
/*
@@ -1052,7 +1052,7 @@ done:
H5_ATTR_PURE static size_t
h5tools_count_ncols(const char *s)
{
- register size_t i;
+ size_t i;
for (i = 0; *s; s++)
if (*s >= ' ')
diff --git a/tools/lib/h5tools.h b/tools/lib/h5tools.h
index cc6183a..d3501b3 100644
--- a/tools/lib/h5tools.h
+++ b/tools/lib/h5tools.h
@@ -29,9 +29,6 @@
#define START_OF_DATA 0x0001
#define END_OF_DATA 0x0002
-/* format for hsize_t */
-#define HSIZE_T_FORMAT "%" H5_PRINTF_LL_WIDTH "u"
-
#define H5TOOLS_DUMP_MAX_RANK H5S_MAX_RANK
/* Stream macros */
diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c
index 0fc5d2d..78dd3ff 100644
--- a/tools/lib/h5tools_dump.c
+++ b/tools/lib/h5tools_dump.c
@@ -745,11 +745,11 @@ h5tools_dump_region_data_blocks(hid_t region_space, hid_t region_id, FILE *strea
/* Start coordinates and opposite corner */
for (loop_indx = 0; loop_indx < ndims; loop_indx++)
- h5tools_str_append(buffer, "%s" HSIZE_T_FORMAT, loop_indx ? "," : "(",
+ h5tools_str_append(buffer, "%s%" PRIuHSIZE, loop_indx ? "," : "(",
ptdata[indx * 2 * ndims + loop_indx]);
for (loop_indx = 0; loop_indx < ndims; loop_indx++)
- h5tools_str_append(buffer, "%s" HSIZE_T_FORMAT, loop_indx ? "," : ")-(",
+ h5tools_str_append(buffer, "%s%" PRIuHSIZE, loop_indx ? "," : ")-(",
ptdata[indx * 2 * ndims + loop_indx + ndims]);
h5tools_str_append(buffer, ")");
@@ -1101,7 +1101,7 @@ h5tools_dump_region_data_points(hid_t region_space, hid_t region_id, FILE *strea
(unsigned long)indx);
for (loop_indx = 0; loop_indx < ndims; loop_indx++)
- h5tools_str_append(buffer, "%s" HSIZE_T_FORMAT, loop_indx ? "," : "(",
+ h5tools_str_append(buffer, "%s%" PRIuHSIZE, loop_indx ? "," : "(",
ptdata[indx * ndims + loop_indx]);
h5tools_str_append(buffer, ")");
@@ -2486,7 +2486,7 @@ found_string_type:
ctx->need_prefix = TRUE;
h5tools_str_reset(buffer);
- h5tools_str_append(buffer, "OPAQUE_SIZE \"%s\";", size);
+ h5tools_str_append(buffer, "OPAQUE_SIZE \"%zu\";", size);
h5tools_render_element(stream, info, ctx, buffer, &curr_pos, (size_t)ncols, (hsize_t)0,
(hsize_t)0);
}
@@ -2608,7 +2608,7 @@ found_string_type:
if (H5Tget_array_dims2(type, dims) >= 0) {
/* Print array dimensions */
for (i = 0; i < ndims; i++)
- h5tools_str_append(buffer, "[" HSIZE_T_FORMAT "]", dims[i]);
+ h5tools_str_append(buffer, "[%" PRIuHSIZE "]", dims[i]);
h5tools_str_append(buffer, " ");
}
@@ -2683,12 +2683,12 @@ h5tools_print_dataspace(h5tools_str_t *buffer, hid_t space)
case H5S_SIMPLE:
/* simple dataspace */
- h5tools_str_append(buffer, "%s %s { %s " HSIZE_T_FORMAT,
+ h5tools_str_append(buffer, "%s %s { %s %" PRIuHSIZE,
h5tools_dump_header_format->dataspacedescriptionbegin, S_SIMPLE,
h5tools_dump_header_format->dataspacedimbegin, size[0]);
for (i = 1; i < ndims; i++)
- h5tools_str_append(buffer, ", " HSIZE_T_FORMAT, size[i]);
+ h5tools_str_append(buffer, ", %" PRIuHSIZE, size[i]);
h5tools_str_append(buffer, " %s / ", h5tools_dump_header_format->dataspacedimend);
@@ -2696,14 +2696,14 @@ h5tools_print_dataspace(h5tools_str_t *buffer, hid_t space)
h5tools_str_append(buffer, "%s %s", h5tools_dump_header_format->dataspacedimbegin,
"H5S_UNLIMITED");
else
- h5tools_str_append(buffer, "%s " HSIZE_T_FORMAT,
- h5tools_dump_header_format->dataspacedimbegin, maxsize[0]);
+ h5tools_str_append(buffer, "%s %" PRIuHSIZE, h5tools_dump_header_format->dataspacedimbegin,
+ maxsize[0]);
for (i = 1; i < ndims; i++)
if (maxsize[i] == H5S_UNLIMITED)
h5tools_str_append(buffer, ", %s", "H5S_UNLIMITED");
else
- h5tools_str_append(buffer, ", " HSIZE_T_FORMAT, maxsize[i]);
+ h5tools_str_append(buffer, ", %" PRIuHSIZE, maxsize[i]);
h5tools_str_append(buffer, " %s }", h5tools_dump_header_format->dataspacedimend);
break;
@@ -2742,7 +2742,6 @@ h5tools_print_enum(FILE *stream, h5tools_str_t *buffer, const h5tool_format_t *i
{
char ** name = NULL; /*member names */
unsigned char *value = NULL; /*value array */
- unsigned char *copy = NULL; /*a pointer to value array */
unsigned i;
unsigned nmembs = 0; /*number of members */
int snmembs;
@@ -2834,16 +2833,16 @@ h5tools_print_enum(FILE *stream, h5tools_str_t *buffer, const h5tool_format_t *i
h5tools_str_append(buffer, "%02x", value[i * dst_size + j]);
}
else if (H5T_SGN_NONE == H5Tget_sign(native)) {
- /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size"
- *strangely, unless use another pointer "copy".*/
- copy = value + i * dst_size;
- h5tools_str_append(buffer, HSIZE_T_FORMAT, *((unsigned long long *)((void *)copy)));
+ unsigned long long copy;
+
+ HDmemcpy(&copy, value + i * dst_size, sizeof(copy));
+ h5tools_str_append(buffer, "%llu", copy);
}
else {
- /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size"
- *strangely, unless use another pointer "copy".*/
- copy = value + i * dst_size;
- h5tools_str_append(buffer, "%" H5_PRINTF_LL_WIDTH "d", *((long long *)((void *)copy)));
+ long long copy;
+
+ HDmemcpy(&copy, value + i * dst_size, sizeof(copy));
+ h5tools_str_append(buffer, "%lld", copy);
}
h5tools_str_append(buffer, ";");
@@ -2996,7 +2995,7 @@ h5tools_dump_oid(FILE *stream, const h5tool_format_t *info, h5tools_context_t *c
ctx->need_prefix = TRUE;
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer, "%s %s %d %s", OBJID, BEGIN, oid, END);
+ h5tools_str_append(&buffer, "%s %s %" PRId64 " %s", OBJID, BEGIN, oid, END);
h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0, (hsize_t)0);
h5tools_str_close(&buffer);
@@ -3174,10 +3173,10 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
h5tools_str_append(&buffer, "%s ", CHUNKED);
rank = H5Pget_chunk(dcpl_id, (int)NELMTS(chsize), chsize);
- h5tools_str_append(&buffer, "%s " HSIZE_T_FORMAT, h5tools_dump_header_format->dataspacedimbegin,
+ h5tools_str_append(&buffer, "%s %" PRIuHSIZE, h5tools_dump_header_format->dataspacedimbegin,
chsize[0]);
for (i = 1; i < rank; i++)
- h5tools_str_append(&buffer, ", " HSIZE_T_FORMAT, chsize[i]);
+ h5tools_str_append(&buffer, ", %" PRIuHSIZE, chsize[i]);
h5tools_str_append(&buffer, " %s", h5tools_dump_header_format->dataspacedimend);
h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0,
(hsize_t)0);
@@ -3219,17 +3218,17 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
if (storage_size != 0)
ratio = (double)uncomp_size / (double)storage_size;
- h5tools_str_append(&buffer, "SIZE " HSIZE_T_FORMAT " (%.3f:1 COMPRESSION)", storage_size,
+ h5tools_str_append(&buffer, "SIZE %" PRIuHSIZE " (%.3f:1 COMPRESSION)", storage_size,
ratio);
}
else
- h5tools_str_append(&buffer, "SIZE " HSIZE_T_FORMAT, storage_size);
+ h5tools_str_append(&buffer, "SIZE %" PRIuHSIZE, storage_size);
H5Sclose(sid);
H5Tclose(tid);
}
else {
- h5tools_str_append(&buffer, "SIZE " HSIZE_T_FORMAT, storage_size);
+ h5tools_str_append(&buffer, "SIZE %" PRIuHSIZE, storage_size);
}
h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0,
(hsize_t)0);
@@ -3247,7 +3246,7 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
ctx->need_prefix = TRUE;
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer, "SIZE " HSIZE_T_FORMAT, storage_size);
+ h5tools_str_append(&buffer, "SIZE %" PRIuHSIZE, storage_size);
h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0,
(hsize_t)0);
ctx->indent_level--;
@@ -3283,7 +3282,7 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
ctx->need_prefix = TRUE;
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer, "FILENAME %s SIZE " HSIZE_T_FORMAT, name, size);
+ h5tools_str_append(&buffer, "FILENAME %s SIZE %" PRIuHSIZE, name, size);
h5tools_str_append(&buffer, " OFFSET %ld", offset);
h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0,
(hsize_t)0);
@@ -3313,7 +3312,7 @@ h5tools_dump_dcpl(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
ctx->need_prefix = TRUE;
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer, "SIZE " HSIZE_T_FORMAT, storage_size);
+ h5tools_str_append(&buffer, "SIZE %" PRIuHSIZE, storage_size);
h5tools_render_element(stream, info, ctx, &buffer, &curr_pos, (size_t)ncols, (hsize_t)0,
(hsize_t)0);
@@ -3909,7 +3908,7 @@ h5tools_print_dims(h5tools_str_t *buffer, hsize_t *s, int dims)
int i;
for (i = 0; i < dims; i++) {
- h5tools_str_append(buffer, HSIZE_T_FORMAT, s[i]);
+ h5tools_str_append(buffer, "%" PRIuHSIZE, s[i]);
if (i + 1 != dims)
h5tools_str_append(buffer, ", ");
@@ -4330,7 +4329,7 @@ h5tools_dump_data(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
if (string_dataformat.pindex) {
string_dataformat.idx_fmt = "(%s): ";
- string_dataformat.idx_n_fmt = HSIZE_T_FORMAT;
+ string_dataformat.idx_n_fmt = "%" PRIuHSIZE;
string_dataformat.idx_sep = ",";
string_dataformat.line_pre = "%s";
}
diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c
index 4e8dcc1..5976044 100644
--- a/tools/lib/h5tools_str.c
+++ b/tools/lib/h5tools_str.c
@@ -300,11 +300,11 @@ h5tools_str_prefix(h5tools_str_t *str /*in,out*/, const h5tool_format_t *info, h
if (i)
h5tools_str_append(str, "%s", OPT(info->idx_sep, ","));
- h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t)ctx->pos[i]);
+ h5tools_str_append(str, OPT(info->idx_n_fmt, "%" PRIuHSIZE), (hsize_t)ctx->pos[i]);
}
}
else /* Scalar */
- h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t)elmtno);
+ h5tools_str_append(str, OPT(info->idx_n_fmt, "%" PRIuHSIZE), (hsize_t)elmtno);
H5TOOLS_DEBUG("str=%s", str->s);
H5TOOLS_ENDDEBUG(" ");
@@ -341,11 +341,11 @@ h5tools_str_region_prefix(h5tools_str_t *str /*in,out*/, const h5tool_format_t *
if (i)
h5tools_str_append(str, "%s", OPT(info->idx_sep, ","));
- h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t)ctx->pos[i]);
+ h5tools_str_append(str, OPT(info->idx_n_fmt, "%" PRIuHSIZE), (hsize_t)ctx->pos[i]);
}
}
else /* Scalar */
- h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t)0);
+ h5tools_str_append(str, OPT(info->idx_n_fmt, "%" PRIuHSIZE), (hsize_t)0);
H5TOOLS_DEBUG("str=%s", str->s);
H5TOOLS_ENDDEBUG(" ");
@@ -385,7 +385,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace, const h5tool_form
/* Start coordinates */
h5tools_str_append(str, "%s%s ", info->line_indent, START);
for (j = 0; j < ndims; j++)
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, j ? "," : "(", start[j]);
+ h5tools_str_append(str, "%s%" PRIuHSIZE, j ? "," : "(", start[j]);
h5tools_str_append(str, ")");
h5tools_str_append(str, "%s", "\n");
h5tools_str_indent(str, info, ctx);
@@ -393,7 +393,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace, const h5tool_form
/* Stride coordinates */
h5tools_str_append(str, "%s ", STRIDE);
for (j = 0; j < ndims; j++)
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, j ? "," : "(", stride[j]);
+ h5tools_str_append(str, "%s%" PRIuHSIZE, j ? "," : "(", stride[j]);
h5tools_str_append(str, ")");
h5tools_str_append(str, "%s", "\n");
h5tools_str_indent(str, info, ctx);
@@ -404,7 +404,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace, const h5tool_form
if (count[j] == H5S_UNLIMITED)
h5tools_str_append(str, "%s%s", j ? "," : "(", "H5S_UNLIMITED");
else
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, j ? "," : "(", count[j]);
+ h5tools_str_append(str, "%s%" PRIuHSIZE, j ? "," : "(", count[j]);
}
h5tools_str_append(str, ")");
h5tools_str_append(str, "%s", "\n");
@@ -416,7 +416,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace, const h5tool_form
if (block[j] == H5S_UNLIMITED)
h5tools_str_append(str, "%s%s", j ? "," : "(", "H5S_UNLIMITED");
else
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, j ? "," : "(", block[j]);
+ h5tools_str_append(str, "%s%" PRIuHSIZE, j ? "," : "(", block[j]);
}
h5tools_str_append(str, ")");
}
@@ -468,11 +468,10 @@ h5tools_str_dump_space_blocks(h5tools_str_t *str, hid_t rspace, const h5tool_for
/* Start coordinates and opposite corner */
for (v = 0; v < ndims; v++)
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, v ? "," : "(", ptdata[u * 2 * ndims + v]);
+ h5tools_str_append(str, "%s%" PRIuHSIZE, v ? "," : "(", ptdata[u * 2 * ndims + v]);
for (v = 0; v < ndims; v++)
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, v ? "," : ")-(",
- ptdata[u * 2 * ndims + v + ndims]);
+ h5tools_str_append(str, "%s%" PRIuHSIZE, v ? "," : ")-(", ptdata[u * 2 * ndims + v + ndims]);
h5tools_str_append(str, ")");
}
@@ -527,7 +526,7 @@ h5tools_str_dump_space_points(h5tools_str_t *str, hid_t rspace, const h5tool_for
(unsigned long)u);
for (v = 0; v < ndims; v++)
- h5tools_str_append(str, "%s" HSIZE_T_FORMAT, v ? "," : "(", (ptdata[u * ndims + v]));
+ h5tools_str_append(str, "%s%" PRIuHSIZE, v ? "," : "(", (ptdata[u * ndims + v]));
h5tools_str_append(str, ")");
}
@@ -1052,7 +1051,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
H5TOOLS_DEBUG("H5T_ENUM");
if (H5Tenum_nameof(type, vp, enum_name, sizeof enum_name) >= 0)
- h5tools_str_append(str, h5tools_escape(enum_name, sizeof(enum_name)));
+ h5tools_str_append(str, "%s", h5tools_escape(enum_name, sizeof(enum_name)));
else {
size_t i;
if (1 == nsize)
@@ -1402,10 +1401,10 @@ h5tools_str_sprint_reference(h5tools_str_t *str, H5R_ref_t *ref_vp)
static char *
h5tools_escape(char *s /*in,out*/, size_t size)
{
- register size_t i;
- const char * escape;
- char octal[8];
- size_t n = HDstrlen(s);
+ size_t i;
+ const char *escape;
+ char octal[8];
+ size_t n = HDstrlen(s);
for (i = 0; i < n; i++) {
switch (s[i]) {
diff --git a/tools/lib/h5tools_str.h b/tools/lib/h5tools_str.h
index eee87c9..f4610f0 100644
--- a/tools/lib/h5tools_str.h
+++ b/tools/lib/h5tools_str.h
@@ -26,7 +26,7 @@ typedef struct h5tools_str_t {
H5TOOLS_DLL void h5tools_str_close(h5tools_str_t *str);
H5TOOLS_DLL size_t h5tools_str_len(h5tools_str_t *str);
-H5TOOLS_DLL char * h5tools_str_append(h5tools_str_t *str, const char *fmt, ...);
+H5TOOLS_DLL char * h5tools_str_append(h5tools_str_t *str, const char *fmt, ...) H5_ATTR_FORMAT(printf, 2, 3);
H5TOOLS_DLL char * h5tools_str_reset(h5tools_str_t *str);
H5TOOLS_DLL char * h5tools_str_trunc(h5tools_str_t *str, size_t size);
H5TOOLS_DLL char * h5tools_str_fmt(h5tools_str_t *str, size_t start, const char *fmt);
diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c
index a5f3317..c572fc0 100644
--- a/tools/lib/h5tools_utils.c
+++ b/tools/lib/h5tools_utils.c
@@ -966,7 +966,7 @@ done:
/*-----------------------------------------------------------
* PURPOSE :
- * Intialize a context which is used as an input to the
+ * Initialize a context which is used as an input to the
* h5tools_get_hyperslab_data() function, which is designed
* to be callable repeatedly until all data from the dataset
* has be retrieved.
@@ -1045,7 +1045,7 @@ h5tools_initialize_hyperslab_context(hid_t dset_id, dataset_context_t **context)
* throws off the starting offsets for higher numbered mpi ranks.
* The following if, else if, end else handle the low mpi
* ranks, followed by the high numbered ranks, and the final
- * else handles the equally distibuted arrays.
+ * else handles the equally distributed arrays.
*/
if ((row_diff > 0) && ((hsize_t)mpi_rank < row_diff)) {
new_context->hs_block[0] += 1;
diff --git a/tools/lib/h5tools_utils.h b/tools/lib/h5tools_utils.h
index 8b29fb9..324c337 100644
--- a/tools/lib/h5tools_utils.h
+++ b/tools/lib/h5tools_utils.h
@@ -135,11 +135,11 @@ H5TOOLS_DLLVAR char * prefix;
/* Definitions of useful routines */
H5TOOLS_DLL void indentation(unsigned);
H5TOOLS_DLL void print_version(const char *progname);
-H5TOOLS_DLL void parallel_print(const char *format, ...);
+H5TOOLS_DLL void parallel_print(const char *format, ...) H5_ATTR_FORMAT(printf, 1, 2);
H5TOOLS_DLL herr_t parse_tuple(const char *start, int sep, char **cpy_out, unsigned *nelems,
char ***ptrs_out);
-H5TOOLS_DLL void error_msg(const char *fmt, ...);
-H5TOOLS_DLL void warn_msg(const char *fmt, ...);
+H5TOOLS_DLL void error_msg(const char *fmt, ...) H5_ATTR_FORMAT(printf, 1, 2);
+H5TOOLS_DLL void warn_msg(const char *fmt, ...) H5_ATTR_FORMAT(printf, 1, 2);
H5TOOLS_DLL void help_ref_msg(FILE *output);
H5TOOLS_DLL void free_table(table_t *table);
#ifdef H5DUMP_DEBUG
diff --git a/tools/lib/h5trav.c b/tools/lib/h5trav.c
index 1f4b0d6..cca36e4 100644
--- a/tools/lib/h5trav.c
+++ b/tools/lib/h5trav.c
@@ -53,11 +53,6 @@ typedef struct trav_path_op_data_t {
const char *path;
} trav_path_op_data_t;
-/* format for hsize_t */
-#ifdef H5TRAV_PRINT_SPACE
-#define HSIZE_T_FORMAT "%" H5_PRINTF_LL_WIDTH "u"
-#endif /* H5TRAV_PRINT_SPACE */
-
/*-------------------------------------------------------------------------
* local functions
*-------------------------------------------------------------------------
@@ -856,7 +851,7 @@ trav_attr(hid_t
/* simple dataspace */
HDprintf(" {");
for (i = 0; i < ndims; i++) {
- HDprintf("%s" HSIZE_T_FORMAT, i ? ", " : "", size[i]);
+ HDprintf("%s%" PRIuHSIZE, i ? ", " : "", size[i]);
}
HDprintf("}\n");
break;
diff --git a/tools/src/h5copy/h5copy.c b/tools/src/h5copy/h5copy.c
index 4e9a25e..eacd05b 100644
--- a/tools/src/h5copy/h5copy.c
+++ b/tools/src/h5copy/h5copy.c
@@ -19,7 +19,7 @@
#define PROGRAMNAME "h5copy"
/* command-line options: short and long-named parameters */
-static const char * s_opts = "d:f:hi:o:ps:vVE";
+static const char * s_opts = "d:f:hi:o:ps:vVE*";
static struct h5_long_options l_opts[] = {{"destination", require_arg, 'd'},
{"flag", require_arg, 'f'},
{"help", no_arg, 'h'},
@@ -29,7 +29,7 @@ static struct h5_long_options l_opts[] = {{"destination", require_arg, 'd'},
{"source", require_arg, 's'},
{"verbose", no_arg, 'v'},
{"version", no_arg, 'V'},
- {"enable-error-stack", no_arg, 'E'},
+ {"enable-error-stack", optional_arg, 'E'},
{NULL, 0, '\0'}};
char * fname_src = NULL;
char * fname_dst = NULL;
@@ -93,15 +93,17 @@ usage(void)
PRINTVALSTREAM(rawoutstream, " -o, --output output file name\n");
PRINTVALSTREAM(rawoutstream, " -s, --source source object name\n");
PRINTVALSTREAM(rawoutstream, " -d, --destination destination object name\n");
+ PRINTVALSTREAM(rawoutstream, " ERROR\n");
+ PRINTVALSTREAM(rawoutstream,
+ " --enable-error-stack Prints messages from the HDF5 error stack as they occur.\n");
+ PRINTVALSTREAM(rawoutstream,
+ " Optional value 2 also prints file open errors.\n");
PRINTVALSTREAM(rawoutstream, " OPTIONS\n");
PRINTVALSTREAM(rawoutstream, " -h, --help Print a usage message and exit\n");
PRINTVALSTREAM(rawoutstream,
" -p, --parents No error if existing, make parent groups as needed\n");
PRINTVALSTREAM(rawoutstream, " -v, --verbose Print information about OBJECTS and OPTIONS\n");
PRINTVALSTREAM(rawoutstream, " -V, --version Print version number and exit\n");
- PRINTVALSTREAM(rawoutstream, " --enable-error-stack\n");
- PRINTVALSTREAM(rawoutstream,
- " Prints messages from the HDF5 error stack as they occur.\n");
PRINTVALSTREAM(rawoutstream, " -f, --flag Flag type\n\n");
PRINTVALSTREAM(rawoutstream, " Flag type is one of the following strings:\n\n");
PRINTVALSTREAM(rawoutstream, " shallow Copy only immediate members for groups\n\n");
@@ -212,7 +214,7 @@ parse_flag(const char *s_flag, unsigned *flag)
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid_src = H5I_INVALID_HID;
hid_t fid_dst = H5I_INVALID_HID;
@@ -242,7 +244,7 @@ main(int argc, const char *argv[])
} /* end if */
/* parse command line options */
- while ((opt = H5_get_option(argc, argv, s_opts, l_opts)) != EOF) {
+ while ((opt = H5_get_option(argc, (const char *const *)argv, s_opts, l_opts)) != EOF) {
switch ((char)opt) {
case 'd':
oname_dst = HDstrdup(H5_optarg);
@@ -288,7 +290,10 @@ main(int argc, const char *argv[])
break;
case 'E':
- enable_error_stack = 1;
+ if (H5_optarg != NULL)
+ enable_error_stack = HDatoi(H5_optarg);
+ else
+ enable_error_stack = 1;
break;
default:
diff --git a/tools/src/h5diff/h5diff_common.c b/tools/src/h5diff/h5diff_common.c
index 9dcd91b..98ac8f7 100644
--- a/tools/src/h5diff/h5diff_common.c
+++ b/tools/src/h5diff/h5diff_common.c
@@ -25,7 +25,7 @@ static int check_d_input(const char *);
* Command-line options: The user can specify short or long-named
* parameters.
*/
-static const char * s_opts = "hVrv*qn:d:p:NcelxE:A:S";
+static const char * s_opts = "hVrv*qn:d:p:NcelxE:A:S*";
static struct h5_long_options l_opts[] = {{"help", no_arg, 'h'},
{"version", no_arg, 'V'},
{"report", no_arg, 'r'},
@@ -41,7 +41,7 @@ static struct h5_long_options l_opts[] = {{"help", no_arg, 'h'},
{"no-dangling-links", no_arg, 'x'},
{"exclude-path", require_arg, 'E'},
{"exclude-attribute", require_arg, 'A'},
- {"enable-error-stack", no_arg, 'S'},
+ {"enable-error-stack", optional_arg, 'S'},
{"vol-value-1", require_arg, '1'},
{"vol-name-1", require_arg, '2'},
{"vol-info-1", require_arg, '3'},
@@ -217,7 +217,7 @@ parse_subset_params(const char *dset)
*/
void
-parse_command_line(int argc, const char *argv[], const char **fname1, const char **fname2,
+parse_command_line(int argc, const char *const *argv, const char **fname1, const char **fname2,
const char **objname1, const char **objname2, diff_opt_t *opts)
{
int i;
@@ -311,7 +311,10 @@ parse_command_line(int argc, const char *argv[], const char **fname1, const char
break;
case 'S':
- enable_error_stack = 1;
+ if (H5_optarg != NULL)
+ enable_error_stack = HDatoi(H5_optarg);
+ else
+ enable_error_stack = 1;
break;
case 'E':
@@ -705,6 +708,10 @@ usage(void)
PRINTVALSTREAM(rawoutstream, " [obj1] Name of an HDF5 object, in absolute path\n");
PRINTVALSTREAM(rawoutstream, " [obj2] Name of an HDF5 object, in absolute path\n");
PRINTVALSTREAM(rawoutstream, "\n");
+ PRINTVALSTREAM(rawoutstream, " ERROR\n");
+ PRINTVALSTREAM(rawoutstream,
+ " --enable-error-stack Prints messages from the HDF5 error stack as they occur.\n");
+ PRINTVALSTREAM(rawoutstream, " Optional value 2 also prints file open errors.\n");
PRINTVALSTREAM(rawoutstream, " OPTIONS\n");
PRINTVALSTREAM(rawoutstream, " -h, --help\n");
PRINTVALSTREAM(rawoutstream, " Print a usage message and exit.\n");
@@ -727,9 +734,6 @@ usage(void)
PRINTVALSTREAM(rawoutstream, " 3 : All level 2 information plus file names.\n");
PRINTVALSTREAM(rawoutstream, " -q, --quiet\n");
PRINTVALSTREAM(rawoutstream, " Quiet mode. Do not produce output.\n");
- PRINTVALSTREAM(rawoutstream, " --enable-error-stack\n");
- PRINTVALSTREAM(rawoutstream,
- " Prints messages from the HDF5 error stack as they occur.\n");
PRINTVALSTREAM(rawoutstream,
" --vol-value-1 Value (ID) of the VOL connector to use for opening the\n");
PRINTVALSTREAM(rawoutstream, " first HDF5 file specified\n");
diff --git a/tools/src/h5diff/h5diff_common.h b/tools/src/h5diff/h5diff_common.h
index 83f4255..35e5dfb 100644
--- a/tools/src/h5diff/h5diff_common.h
+++ b/tools/src/h5diff/h5diff_common.h
@@ -23,7 +23,7 @@ extern "C" {
#endif
void usage(void);
-void parse_command_line(int argc, const char *argv[], const char **fname1, const char **fname2,
+void parse_command_line(int argc, const char *const *argv, const char **fname1, const char **fname2,
const char **objname1, const char **objname2, diff_opt_t *opts);
void h5diff_exit(int status);
void print_info(diff_opt_t *opts);
diff --git a/tools/src/h5diff/h5diff_main.c b/tools/src/h5diff/h5diff_main.c
index 37c215b..2a43b56 100644
--- a/tools/src/h5diff/h5diff_main.c
+++ b/tools/src/h5diff/h5diff_main.c
@@ -65,7 +65,7 @@
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
int ret;
int i;
@@ -86,7 +86,7 @@ main(int argc, const char *argv[])
* process the command-line
*-------------------------------------------------------------------------
*/
- parse_command_line(argc, argv, &fname1, &fname2, &objname1, &objname2, &opts);
+ parse_command_line(argc, (const char *const *)argv, &fname1, &fname2, &objname1, &objname2, &opts);
/* enable error reporting if command line option */
h5tools_error_report();
diff --git a/tools/src/h5diff/ph5diff_main.c b/tools/src/h5diff/ph5diff_main.c
index af6a29a..e93c1cd 100644
--- a/tools/src/h5diff/ph5diff_main.c
+++ b/tools/src/h5diff/ph5diff_main.c
@@ -46,7 +46,7 @@
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
const char *fname1 = NULL;
const char *fname2 = NULL;
diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c
index a57b0e3..d82ea94 100644
--- a/tools/src/h5dump/h5dump.c
+++ b/tools/src/h5dump/h5dump.c
@@ -165,6 +165,12 @@ usage(const char *prog)
PRINTVALSTREAM(rawoutstream, " OPTIONS\n");
PRINTVALSTREAM(rawoutstream, " -h, --help Print a usage message and exit\n");
PRINTVALSTREAM(rawoutstream, " -V, --version Print version number and exit\n");
+ PRINTVALSTREAM(rawoutstream, "--------------- Error Options ---------------\n");
+ PRINTVALSTREAM(rawoutstream,
+ " --enable-error-stack Prints messages from the HDF5 error stack as they occur.\n");
+ PRINTVALSTREAM(rawoutstream,
+ " Optional value 2 also prints file open errors.\n");
+ PRINTVALSTREAM(rawoutstream, " Default setting disables any error reporting.\n");
PRINTVALSTREAM(rawoutstream, "--------------- File Options ---------------\n");
PRINTVALSTREAM(rawoutstream, " -n, --contents Print a list of the file contents and exit\n");
PRINTVALSTREAM(rawoutstream, " Optional value 1 also prints attributes.\n");
@@ -254,10 +260,6 @@ usage(const char *prog)
PRINTVALSTREAM(rawoutstream, " -q Q, --sort_by=Q Sort groups and attributes by index Q\n");
PRINTVALSTREAM(rawoutstream, " -z Z, --sort_order=Z Sort groups and attributes by order Z\n");
PRINTVALSTREAM(rawoutstream,
- " --enable-error-stack Prints messages from the HDF5 error stack as they occur.\n");
- PRINTVALSTREAM(rawoutstream,
- " Optional value 2 also prints file open errors.\n");
- PRINTVALSTREAM(rawoutstream,
" --no-compact-subset Disable compact form of subsetting and allow the use\n");
PRINTVALSTREAM(rawoutstream, " of \"[\" in dataset names.\n");
PRINTVALSTREAM(rawoutstream,
@@ -828,7 +830,7 @@ free_handler(struct handler_t *hand, int len)
*-------------------------------------------------------------------------
*/
static struct handler_t *
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
struct handler_t *hand = NULL;
struct handler_t *last_dset = NULL;
@@ -1329,7 +1331,7 @@ error:
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid = H5I_INVALID_HID;
hid_t gid = H5I_INVALID_HID;
@@ -1349,7 +1351,7 @@ main(int argc, const char *argv[])
/* Initialize h5tools lib */
h5tools_init();
- if ((hand = parse_command_line(argc, argv)) == NULL) {
+ if ((hand = parse_command_line(argc, (const char *const *)argv)) == NULL) {
goto done;
}
diff --git a/tools/src/h5dump/h5dump_ddl.c b/tools/src/h5dump/h5dump_ddl.c
index 8002ec2..b0bce0c 100644
--- a/tools/src/h5dump/h5dump_ddl.c
+++ b/tools/src/h5dump/h5dump_ddl.c
@@ -838,7 +838,7 @@ dump_group(hid_t gid, const char *name)
type = H5Dget_type(dset);
H5Otoken_to_str(dset, &type_table->objs[u].obj_token, &obj_tok_str);
- HDsprintf(type_name, "#%s", obj_tok_str);
+ HDsnprintf(type_name, sizeof(type_name), "#%s", obj_tok_str);
H5free_memory(obj_tok_str);
dump_function_table->dump_named_datatype_function(type, type_name);
@@ -1318,7 +1318,7 @@ attr_search(hid_t oid, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *a
j = (int)HDstrlen(op_name) - 1;
/* find the last / */
while (j >= 0) {
- if (op_name[j] == '/' && (j == 0 || (j > 0 && op_name[j - 1] != '\\')))
+ if (op_name[j] == '/' && (j == 0 || (op_name[j - 1] != '\\')))
break;
j--;
}
@@ -1541,7 +1541,7 @@ handle_attributes(hid_t fid, const char *attr, void H5_ATTR_UNUSED *data, int H5
/* find the last / */
while (j >= 0) {
- if (attr[j] == '/' && (j == 0 || (j > 0 && attr[j - 1] != '\\')))
+ if (attr[j] == '/' && (j == 0 || (attr[j - 1] != '\\')))
break;
j--;
}
@@ -1970,7 +1970,7 @@ handle_datatypes(hid_t fid, const char *type, void H5_ATTR_UNUSED *data, int pe,
/* unnamed datatype */
H5Otoken_to_str(fid, &type_table->objs[idx].obj_token, &obj_tok_str);
- HDsprintf(name, "/#%s", obj_tok_str);
+ HDsnprintf(name, sizeof(name), "/#%s", obj_tok_str);
H5free_memory(obj_tok_str);
if (!HDstrcmp(name, real_name))
diff --git a/tools/src/h5dump/h5dump_xml.c b/tools/src/h5dump/h5dump_xml.c
index 8fec49c..827daf3 100644
--- a/tools/src/h5dump/h5dump_xml.c
+++ b/tools/src/h5dump/h5dump_xml.c
@@ -591,6 +591,8 @@ xml_name_to_XID(hid_t loc_id, const char *str, char *outstr, int outlen, int gen
if (outlen < 22)
return 1;
+ H5_CHECK_OVERFLOW(outlen, int, size_t);
+
lookup_ret = ref_path_table_lookup(str, &obj_token);
if (lookup_ret < 0) {
if (HDstrlen(str) == 0) {
@@ -600,7 +602,7 @@ xml_name_to_XID(hid_t loc_id, const char *str, char *outstr, int outlen, int gen
ref_path_table_gen_fake(str, &obj_token);
H5Otoken_to_str(loc_id, &obj_token, &obj_tok_str);
- HDsprintf(outstr, "xid_%s", obj_tok_str);
+ HDsnprintf(outstr, (size_t)outlen, "xid_%s", obj_tok_str);
H5free_memory(obj_tok_str);
return 0;
@@ -615,7 +617,7 @@ xml_name_to_XID(hid_t loc_id, const char *str, char *outstr, int outlen, int gen
ref_path_table_gen_fake(str, &obj_token);
H5Otoken_to_str(loc_id, &obj_token, &obj_tok_str);
- HDsprintf(outstr, "xid_%s", obj_tok_str);
+ HDsnprintf(outstr, (size_t)outlen, "xid_%s", obj_tok_str);
H5free_memory(obj_tok_str);
return 0;
@@ -627,7 +629,7 @@ xml_name_to_XID(hid_t loc_id, const char *str, char *outstr, int outlen, int gen
}
H5Otoken_to_str(loc_id, &obj_token, &obj_tok_str);
- HDsprintf(outstr, "xid_%s", obj_tok_str);
+ HDsnprintf(outstr, (size_t)outlen, "xid_%s", obj_tok_str);
H5free_memory(obj_tok_str);
return 0;
@@ -1777,8 +1779,7 @@ xml_dump_dataspace(hid_t space)
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer,
- "<%sDimension DimSize=\"%" H5_PRINTF_LL_WIDTH
- "u\" MaxDimSize=\"UNLIMITED\"/>",
+ "<%sDimension DimSize=\"%" PRIuHSIZE "\" MaxDimSize=\"UNLIMITED\"/>",
xmlnsprefix, size[i]);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos,
(size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
@@ -1788,10 +1789,9 @@ xml_dump_dataspace(hid_t space)
/* Render the element */
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer,
- "<%sDimension DimSize=\"%" H5_PRINTF_LL_WIDTH
- "u\" MaxDimSize=\"%" H5_PRINTF_LL_WIDTH "u\"/>",
- xmlnsprefix, size[i], size[i]);
+ h5tools_str_append(
+ &buffer, "<%sDimension DimSize=\"%" PRIuHSIZE "\" MaxDimSize=\"%" PRIuHSIZE "\"/>",
+ xmlnsprefix, size[i], size[i]);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos,
(size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
}
@@ -1800,10 +1800,9 @@ xml_dump_dataspace(hid_t space)
/* Render the element */
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer,
- "<%sDimension DimSize=\"%" H5_PRINTF_LL_WIDTH
- "u\" MaxDimSize=\"%" H5_PRINTF_LL_WIDTH "u\"/>",
- xmlnsprefix, size[i], maxsize[i]);
+ h5tools_str_append(
+ &buffer, "<%sDimension DimSize=\"%" PRIuHSIZE "\" MaxDimSize=\"%" PRIuHSIZE "\"/>",
+ xmlnsprefix, size[i], maxsize[i]);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos,
(size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
}
@@ -2802,7 +2801,7 @@ xml_dump_group(hid_t gid, const char *name)
type = H5Dget_type(dset);
H5Otoken_to_str(dset, &type_table->objs[u].obj_token, &obj_tok_str);
- HDsprintf(type_name, "#%s", obj_tok_str);
+ HDsnprintf(type_name, sizeof(type_name), "#%s", obj_tok_str);
H5free_memory(obj_tok_str);
dump_function_table->dump_named_datatype_function(type, type_name);
@@ -2895,7 +2894,7 @@ xml_dump_group(hid_t gid, const char *name)
type = H5Dget_type(dset);
H5Otoken_to_str(dset, &type_table->objs[u].obj_token, &obj_tok_str);
- HDsprintf(type_name, "#%s", obj_tok_str);
+ HDsnprintf(type_name, sizeof(type_name), "#%s", obj_tok_str);
H5free_memory(obj_tok_str);
dump_function_table->dump_named_datatype_function(type, type_name);
@@ -3598,7 +3597,10 @@ xml_dump_fill_value(hid_t dcpl, hid_t type)
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "\"");
for (i = 0; i < sz; i++) {
- h5tools_str_append(&buffer, "%x ", *(unsigned int *)buf + (i * sizeof(unsigned int)));
+ unsigned long val = *(unsigned int *)buf + (i * sizeof(unsigned int));
+
+ H5_CHECK_OVERFLOW(val, unsigned long, unsigned);
+ h5tools_str_append(&buffer, "%x ", (unsigned)val);
}
h5tools_str_append(&buffer, "\"");
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos,
@@ -3892,8 +3894,8 @@ xml_dump_dataset(hid_t did, const char *name, struct subset_t H5_ATTR_UNUSED *ss
/* Render the element */
h5tools_str_reset(&buffer);
- h5tools_str_append(&buffer, "<%sChunkDimension DimSize=\"%" H5_PRINTF_LL_WIDTH "u\" />",
- xmlnsprefix, chsize[i]);
+ h5tools_str_append(&buffer, "<%sChunkDimension DimSize=\"%" PRIuHSIZE "\" />", xmlnsprefix,
+ chsize[i]);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos,
(size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
}
@@ -4524,12 +4526,16 @@ xml_print_enum(hid_t type)
h5tools_str_append(&buffer, "%02x", value[i * dst_size + j]);
}
else if (H5T_SGN_NONE == H5Tget_sign(native)) {
- h5tools_str_append(&buffer, "%" H5_PRINTF_LL_WIDTH "u",
- *((unsigned long long *)((void *)(value + i * dst_size))));
+ unsigned long long copy;
+
+ HDmemcpy(&copy, value + i * dst_size, sizeof(copy));
+ h5tools_str_append(&buffer, "%llu", copy);
}
else {
- h5tools_str_append(&buffer, "%" H5_PRINTF_LL_WIDTH "d",
- *((long long *)((void *)(value + i * dst_size))));
+ long long copy;
+
+ HDmemcpy(&copy, value + i * dst_size, sizeof(copy));
+ h5tools_str_append(&buffer, "%lld", copy);
}
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos,
(size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
diff --git a/tools/src/h5format_convert/h5format_convert.c b/tools/src/h5format_convert/h5format_convert.c
index ddf129c..50d0f5d 100644
--- a/tools/src/h5format_convert/h5format_convert.c
+++ b/tools/src/h5format_convert/h5format_convert.c
@@ -94,7 +94,7 @@ usage(const char *prog)
*-------------------------------------------------------------------------
*/
static int
-parse_command_line(int argc, const char **argv)
+parse_command_line(int argc, const char *const *argv)
{
int opt;
@@ -127,7 +127,7 @@ parse_command_line(int argc, const char **argv)
dname_g = HDstrdup(H5_optarg);
if (dname_g == NULL) {
h5tools_setstatus(EXIT_FAILURE);
- error_msg("No dataset name\n", H5_optarg);
+ error_msg("No dataset name `%s`\n", H5_optarg);
usage(h5tools_getprogname());
goto error;
}
@@ -383,7 +383,7 @@ error:
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid = H5I_INVALID_HID;
@@ -394,7 +394,7 @@ main(int argc, const char *argv[])
h5tools_init();
/* Parse command line options */
- if (parse_command_line(argc, argv) < 0)
+ if (parse_command_line(argc, (const char *const *)argv) < 0)
goto done;
else if (verbose_g)
HDfprintf(stdout, "Process command line options\n");
diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c
index 31fa7cf..317db81 100644
--- a/tools/src/h5import/h5import.c
+++ b/tools/src/h5import/h5import.c
@@ -3784,7 +3784,7 @@ setDefaultValues(struct Input *in, int count)
in->path.count = 1;
HDstrcpy(temp, "dataset");
- HDsprintf(num, "%d", count);
+ HDsnprintf(num, sizeof(num), "%d", count);
HDstrcat(temp, num);
HDstrcpy(in->path.group[0], temp);
diff --git a/tools/src/h5jam/h5jam.c b/tools/src/h5jam/h5jam.c
index 07140b5..45978bb 100644
--- a/tools/src/h5jam/h5jam.c
+++ b/tools/src/h5jam/h5jam.c
@@ -22,7 +22,7 @@
herr_t write_pad(int ofile, hsize_t old_where, hsize_t *new_where);
hsize_t compute_user_block_size(hsize_t);
hsize_t copy_some_to_file(int, int, hsize_t, hsize_t, ssize_t);
-void parse_command_line(int, const char *[]);
+void parse_command_line(int, const char *const *);
int do_clobber = FALSE;
char *output_file = NULL;
@@ -104,7 +104,7 @@ leave(int ret)
*/
void
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
int opt = FALSE;
@@ -149,7 +149,7 @@ parse_command_line(int argc, const char *argv[])
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
int ufid = -1;
int h5fid = -1;
@@ -174,7 +174,7 @@ main(int argc, const char *argv[])
/* Initialize h5tools lib */
h5tools_init();
- parse_command_line(argc, argv);
+ parse_command_line(argc, (const char *const *)argv);
/* enable error reporting if command line option */
h5tools_error_report();
diff --git a/tools/src/h5jam/h5unjam.c b/tools/src/h5jam/h5unjam.c
index fa23b06..8ff354e 100644
--- a/tools/src/h5jam/h5unjam.c
+++ b/tools/src/h5jam/h5unjam.c
@@ -92,7 +92,7 @@ usage(const char *prog)
*-------------------------------------------------------------------------
*/
static int
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
int opt = FALSE;
@@ -172,7 +172,7 @@ leave(int ret)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t ifile = H5I_INVALID_HID;
hid_t plist = H5I_INVALID_HID;
@@ -189,7 +189,7 @@ main(int argc, const char *argv[])
/* Initialize h5tools lib */
h5tools_init();
- if (EXIT_FAILURE == parse_command_line(argc, argv))
+ if (EXIT_FAILURE == parse_command_line(argc, (const char *const *)argv))
goto done;
/* enable error reporting if command line option */
diff --git a/tools/src/h5ls/h5ls.c b/tools/src/h5ls/h5ls.c
index 17abf43..c30c08f 100644
--- a/tools/src/h5ls/h5ls.c
+++ b/tools/src/h5ls/h5ls.c
@@ -78,9 +78,9 @@ static h5tool_format_t ls_dataformat = {
",", /*elmt_suf1 */
" ", /*elmt_suf2 */
- HSIZE_T_FORMAT, /*idx_n_fmt */
- ",", /*idx_sep */
- "(%s)", /*idx_fmt */
+ "%" PRIuHSIZE, /*idx_n_fmt */
+ ",", /*idx_sep */
+ "(%s)", /*idx_fmt */
65535,
/*line_ncols */ /*standard default columns */
@@ -942,8 +942,7 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind)
/* Print members */
for (i = 0; i < (unsigned)nmembs; i++) {
- unsigned char *copy; /* a pointer to value array */
- int nchars; /* number of output characters */
+ int nchars; /* number of output characters */
h5tools_str_append(buffer, "\n%*s", ind + 4, "");
nchars = print_string(buffer, name[i], TRUE);
@@ -957,16 +956,16 @@ print_enum_type(h5tools_str_t *buffer, hid_t type, int ind)
h5tools_str_append(buffer, "%02x", value[i * dst_size + j]);
}
else if (H5T_SGN_NONE == H5Tget_sign(native)) {
- /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size"
- *strangely, unless use another pointer "copy".*/
- copy = value + i * dst_size;
- h5tools_str_append(buffer, HSIZE_T_FORMAT, *((unsigned long long *)((void *)copy)));
+ unsigned long long copy;
+
+ HDmemcpy(&copy, value + i * dst_size, sizeof(copy));
+ h5tools_str_append(buffer, "%llu", copy);
}
else {
- /*On SGI Altix(cobalt), wrong values were printed out with "value+i*dst_size"
- *strangely, unless use another pointer "copy".*/
- copy = value + i * dst_size;
- h5tools_str_append(buffer, "%lld", *((long long *)((void *)copy)));
+ long long copy;
+
+ HDmemcpy(&copy, value + i * dst_size, sizeof(copy));
+ h5tools_str_append(buffer, "%lld", copy);
}
}
@@ -1185,13 +1184,13 @@ print_array_type(h5tools_str_t *buffer, hid_t type, int ind)
/* Print dimensions */
for (i = 0; i < ndims; i++)
- h5tools_str_append(buffer, "%s" HSIZE_T_FORMAT, i ? "," : "[", dims[i]);
+ h5tools_str_append(buffer, "%s%" PRIuHSIZE, i ? "," : "[", dims[i]);
h5tools_str_append(buffer, "]");
HDfree(dims);
}
else
- h5tools_str_append(buffer, " [SCALAR]\n", rawoutstream);
+ h5tools_str_append(buffer, " [SCALAR]\n");
/* Print parent type */
h5tools_str_append(buffer, " ");
@@ -1701,7 +1700,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain
/* simple dataspace */
h5tools_str_append(&buffer, " {");
for (i = 0; i < ndims; i++) {
- h5tools_str_append(&buffer, "%s" HSIZE_T_FORMAT, i ? ", " : "", size[i]);
+ h5tools_str_append(&buffer, "%s%" PRIuHSIZE, i ? ", " : "", size[i]);
nelmts *= size[i];
}
h5tools_str_append(&buffer, "}\n");
@@ -1789,12 +1788,12 @@ dataset_list1(hid_t dset)
ndims = H5Sget_simple_extent_dims(space, cur_size, max_size);
h5tools_str_append(&buffer, " {");
for (i = 0; i < ndims; i++) {
- h5tools_str_append(&buffer, "%s" HSIZE_T_FORMAT, i ? ", " : "", cur_size[i]);
+ h5tools_str_append(&buffer, "%s%" PRIuHSIZE, i ? ", " : "", cur_size[i]);
if (max_size[i] == H5S_UNLIMITED) {
h5tools_str_append(&buffer, "/%s", "Inf");
}
else if (max_size[i] != cur_size[i] || verbose_g > 0) {
- h5tools_str_append(&buffer, "/" HSIZE_T_FORMAT, max_size[i]);
+ h5tools_str_append(&buffer, "/%" PRIuHSIZE, max_size[i]);
}
}
if (space_type == H5S_SCALAR)
@@ -1868,10 +1867,10 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name)
h5tools_str_append(&buffer, " %-10s {", "Chunks:");
total = H5Tget_size(type);
for (i = 0; i < ndims; i++) {
- h5tools_str_append(&buffer, "%s" HSIZE_T_FORMAT, i ? ", " : "", chsize[i]);
+ h5tools_str_append(&buffer, "%s%" PRIuHSIZE, i ? ", " : "", chsize[i]);
total *= chsize[i];
}
- h5tools_str_append(&buffer, "} " HSIZE_T_FORMAT " bytes\n", total);
+ h5tools_str_append(&buffer, "} %" PRIuHSIZE " bytes\n", total);
} break;
case H5D_COMPACT:
break;
@@ -1897,15 +1896,13 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name)
if (H5Pget_external(dcpl, (unsigned)i, sizeof(f_name), f_name, &f_offset, &f_size) <
0) {
h5tools_str_append(
- &buffer,
- " #%03d %10" H5_PRINTF_LL_WIDTH "u %10s %10s ***ERROR*** %s\n", i,
+ &buffer, " #%03d %10" PRIuHSIZE " %10s %10s ***ERROR*** %s\n", i,
total, "", "", i + 1 < nf ? "Following addresses are incorrect" : "");
}
else if (H5S_UNLIMITED == f_size) {
h5tools_str_append(&buffer,
- " #%03d %10" H5_PRINTF_LL_WIDTH
- "u %10" H5_PRINTF_LL_WIDTH "u %10s ",
- i, total, (hsize_t)f_offset, "INF");
+ " #%03d %10" PRIuHSIZE " %10" PRIuHSIZE " %10s ", i,
+ total, (hsize_t)f_offset, "INF");
print_string(&buffer, f_name, TRUE);
}
else {
@@ -1985,8 +1982,7 @@ dataset_list2(hid_t dset, const char H5_ATTR_UNUSED *name)
case H5T_ARRAY:
case H5T_NCLASSES:
default:
- h5tools_str_append(&buffer,
- HSIZE_T_FORMAT " logical byte%s, " HSIZE_T_FORMAT " allocated byte%s",
+ h5tools_str_append(&buffer, "%" PRIuHSIZE " logical byte%s, %" PRIuHSIZE " allocated byte%s",
total, 1 == total ? "" : "s", used, 1 == used ? "" : "s");
if (used > 0) {
utilization = ((double)total * 100.0) / (double)used;
@@ -2303,7 +2299,7 @@ list_lnk(const char *name, const H5L_info2_t *linfo, void *_iter)
iter->symlink_list->dangle_link = TRUE;
h5tools_str_append(&buffer, "Soft Link {");
- h5tools_str_append(&buffer, buf);
+ h5tools_str_append(&buffer, "%s", buf);
h5tools_str_append(&buffer, "}");
h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols,
(hsize_t)0, (hsize_t)0);
@@ -2371,11 +2367,11 @@ list_lnk(const char *name, const H5L_info2_t *linfo, void *_iter)
goto done;
h5tools_str_append(&buffer, "External Link {");
- h5tools_str_append(&buffer, filename);
+ h5tools_str_append(&buffer, "%s", filename);
h5tools_str_append(&buffer, "/");
if (*path != '/')
h5tools_str_append(&buffer, "/");
- h5tools_str_append(&buffer, path);
+ h5tools_str_append(&buffer, "%s", path);
h5tools_str_append(&buffer, "}");
h5tools_render_element(rawoutstream, info, &ctx, &buffer, &curr_pos, (size_t)info->line_ncols,
(hsize_t)0, (hsize_t)0);
@@ -2526,7 +2522,7 @@ done:
* were borrowed from the GNU less(1).
*
* Return: Success: Number of columns.
- * Failure: Some default number of columms.
+ * Failure: Some default number of columns.
*-------------------------------------------------------------------------
*/
static int
@@ -2646,7 +2642,7 @@ leave(int ret)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t file_id = H5I_INVALID_HID;
char * fname = NULL, *oname = NULL, *x = NULL;
@@ -2722,9 +2718,9 @@ main(int argc, const char *argv[])
}
else if (!HDstrcmp(argv[argno], "--enable-error-stack")) {
enable_error_stack = 1;
- /* deprecated --errors */
}
else if (!HDstrcmp(argv[argno], "--errors")) {
+ /* deprecated --errors */
enable_error_stack = 1;
}
else if (!HDstrcmp(argv[argno], "--follow-symlinks")) {
diff --git a/tools/src/h5perf/pio_engine.c b/tools/src/h5perf/pio_engine.c
index 3edc74a..1baaca2 100644
--- a/tools/src/h5perf/pio_engine.c
+++ b/tools/src/h5perf/pio_engine.c
@@ -282,7 +282,7 @@ do_pio(parameters param)
/* Open file for write */
char base_name[256];
- HDsprintf(base_name, "#pio_tmp_%lu", nf);
+ HDsnprintf(base_name, sizeof(base_name), "#pio_tmp_%lu", nf);
pio_create_filename(iot, base_name, fname, sizeof(fname));
if (pio_debug_level > 0)
HDfprintf(output, "rank %d: data filename=%s\n", pio_mpi_rank_g, fname);
@@ -420,7 +420,7 @@ pio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si
/* If the prefix specifies the HDF5_PARAPREFIX directory, then
* default to using the "/tmp/$USER" or "/tmp/$LOGIN"
* directory instead. */
- register char *user, *login, *subdir;
+ char *user, *login, *subdir;
user = HDgetenv("USER");
login = HDgetenv("LOGIN");
@@ -898,7 +898,7 @@ do_write(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nby
} /* end if */
} /* end else */
- HDsprintf(dname, "Dataset_%ld", ndset);
+ HDsnprintf(dname, sizeof(dname), "Dataset_%ld", ndset);
h5ds_id = H5DCREATE(fd->h5fd, dname, ELMT_H5_TYPE, h5dset_space_id, h5dcpl);
if (h5ds_id < 0) {
@@ -1879,7 +1879,7 @@ do_read(results *res, file_descr *fd, parameters *parms, long ndsets, off_t nbyt
break;
case PHDF5:
- HDsprintf(dname, "Dataset_%ld", ndset);
+ HDsnprintf(dname, sizeof(dname), "Dataset_%ld", ndset);
h5ds_id = H5DOPEN(fd->h5fd, dname);
if (h5ds_id < 0) {
HDfprintf(stderr, "HDF5 Dataset open failed\n");
diff --git a/tools/src/h5perf/pio_perf.c b/tools/src/h5perf/pio_perf.c
index bf5f62f..003c4f2 100644
--- a/tools/src/h5perf/pio_perf.c
+++ b/tools/src/h5perf/pio_perf.c
@@ -188,7 +188,7 @@ typedef struct _minmax {
/* local functions */
static off_t parse_size_directive(const char *size);
-static struct options *parse_command_line(int argc, char *argv[]);
+static struct options *parse_command_line(int argc, const char *const *argv);
static void run_test_loop(struct options *options);
static int run_test(iotype iot, parameters parms, struct options *opts);
static void output_all_info(minmax *mm, int count, int indent_level);
@@ -200,7 +200,7 @@ static void output_results(const struct options *options, const char *name, min
off_t data_size);
static void output_times(const struct options *options, const char *name, minmax *table, int table_size);
static void output_report(const char *fmt, ...);
-static void print_indent(register int indent);
+static void print_indent(int indent);
static void usage(const char *prog);
static void report_parameters(struct options *opts);
static off_t squareo(off_t);
@@ -260,7 +260,7 @@ main(int argc, char *argv[])
pio_comm_g = MPI_COMM_WORLD;
h5_set_info_object();
- opts = parse_command_line(argc, argv);
+ opts = parse_command_line(argc, (const char *const *)argv);
if (!opts) {
exit_value = EXIT_FAILURE;
@@ -336,7 +336,7 @@ run_test_loop(struct options *opts)
/* start with max_num_procs and decrement it by half for each loop. */
/* if performance needs restart, fewer processes may be needed. */
for (num_procs = opts->max_num_procs; num_procs >= opts->min_num_procs; num_procs >>= 1) {
- register size_t buf_size;
+ size_t buf_size;
parms.num_procs = num_procs;
@@ -411,34 +411,34 @@ run_test_loop(struct options *opts)
static int
run_test(iotype iot, parameters parms, struct options *opts)
{
- results res;
- register int i, ret_value = SUCCESS;
- int comm_size;
- off_t raw_size;
- minmax * write_mpi_mm_table = NULL;
- minmax * write_mm_table = NULL;
- minmax * write_gross_mm_table = NULL;
- minmax * write_raw_mm_table = NULL;
- minmax * read_mpi_mm_table = NULL;
- minmax * read_mm_table = NULL;
- minmax * read_gross_mm_table = NULL;
- minmax * read_raw_mm_table = NULL;
- minmax * read_open_mm_table = NULL;
- minmax * read_close_mm_table = NULL;
- minmax * write_open_mm_table = NULL;
- minmax * write_close_mm_table = NULL;
- minmax write_mpi_mm = {0.0, 0.0, 0.0, 0};
- minmax write_mm = {0.0, 0.0, 0.0, 0};
- minmax write_gross_mm = {0.0, 0.0, 0.0, 0};
- minmax write_raw_mm = {0.0, 0.0, 0.0, 0};
- minmax read_mpi_mm = {0.0, 0.0, 0.0, 0};
- minmax read_mm = {0.0, 0.0, 0.0, 0};
- minmax read_gross_mm = {0.0, 0.0, 0.0, 0};
- minmax read_raw_mm = {0.0, 0.0, 0.0, 0};
- minmax read_open_mm = {0.0, 0.0, 0.0, 0};
- minmax read_close_mm = {0.0, 0.0, 0.0, 0};
- minmax write_open_mm = {0.0, 0.0, 0.0, 0};
- minmax write_close_mm = {0.0, 0.0, 0.0, 0};
+ results res;
+ int i, ret_value = SUCCESS;
+ int comm_size;
+ off_t raw_size;
+ minmax *write_mpi_mm_table = NULL;
+ minmax *write_mm_table = NULL;
+ minmax *write_gross_mm_table = NULL;
+ minmax *write_raw_mm_table = NULL;
+ minmax *read_mpi_mm_table = NULL;
+ minmax *read_mm_table = NULL;
+ minmax *read_gross_mm_table = NULL;
+ minmax *read_raw_mm_table = NULL;
+ minmax *read_open_mm_table = NULL;
+ minmax *read_close_mm_table = NULL;
+ minmax *write_open_mm_table = NULL;
+ minmax *write_close_mm_table = NULL;
+ minmax write_mpi_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_gross_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_raw_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_mpi_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_gross_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_raw_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_open_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_close_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_open_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_close_mm = {0.0, 0.0, 0.0, 0};
raw_size = parms.num_files * (off_t)parms.num_dsets * (off_t)parms.num_bytes;
parms.io_type = iot;
@@ -1098,10 +1098,9 @@ output_report(const char *fmt, ...)
* things.
* Return: Nothing
* Programmer: Bill Wendling, 29. October 2001
- * Modifications:
*/
static void
-print_indent(register int indent)
+print_indent(int indent)
{
int myrank;
@@ -1276,9 +1275,9 @@ report_parameters(struct options *opts)
* Added 2D testing (Christian Chilan, 10. August 2005)
*/
static struct options *
-parse_command_line(int argc, char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
- register int opt;
+ int opt;
struct options *cl_opts;
cl_opts = (struct options *)malloc(sizeof(struct options));
@@ -1305,7 +1304,7 @@ parse_command_line(int argc, char *argv[])
cl_opts->h5_write_only = FALSE; /* Do both read and write by default */
cl_opts->verify = FALSE; /* No Verify data correctness by default */
- while ((opt = H5_get_option(argc, (const char **)argv, s_opts, l_opts)) != EOF) {
+ while ((opt = H5_get_option(argc, argv, s_opts, l_opts)) != EOF) {
switch ((char)opt) {
case 'a':
cl_opts->h5_alignment = parse_size_directive(H5_optarg);
diff --git a/tools/src/h5perf/sio_engine.c b/tools/src/h5perf/sio_engine.c
index 98cb920..bdd2c08 100644
--- a/tools/src/h5perf/sio_engine.c
+++ b/tools/src/h5perf/sio_engine.c
@@ -328,7 +328,7 @@ sio_create_filename(iotype iot, const char *base_name, char *fullname, size_t si
/* If the prefix specifies the HDF5_PREFIX directory, then
* default to using the "/tmp/$USER" or "/tmp/$LOGIN"
* directory instead. */
- register char *user, *login, *subdir;
+ char *user, *login, *subdir;
user = HDgetenv("USER");
login = HDgetenv("LOGIN");
@@ -1267,7 +1267,7 @@ done:
static void
do_cleanupfile(iotype iot, char *filename)
{
- char temp[2048];
+ char temp[4096 + sizeof("-?.h5")];
int j;
hid_t driver;
diff --git a/tools/src/h5perf/sio_perf.c b/tools/src/h5perf/sio_perf.c
index 0eb4508..dc2e15e 100644
--- a/tools/src/h5perf/sio_perf.c
+++ b/tools/src/h5perf/sio_perf.c
@@ -164,7 +164,7 @@ typedef struct {
/* local functions */
static hsize_t parse_size_directive(const char *size);
-static struct options *parse_command_line(int argc, const char *argv[]);
+static struct options *parse_command_line(int argc, const char *const *argv);
static void run_test_loop(struct options *options);
static int run_test(iotype iot, parameters parms, struct options *opts);
static void output_all_info(minmax *mm, int count, int indent_level);
@@ -173,7 +173,7 @@ static void accumulate_minmax_stuff(const minmax *mm, int count, minm
static void output_results(const struct options *options, const char *name, minmax *table, int table_size,
off_t data_size);
static void output_report(const char *fmt, ...);
-static void print_indent(register int indent);
+static void print_indent(int indent);
static void usage(const char *prog);
static void report_parameters(struct options *opts);
@@ -185,7 +185,7 @@ static void report_parameters(struct options *opts);
* Modifications:
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
int exit_value = EXIT_SUCCESS;
struct options *opts = NULL;
@@ -197,7 +197,7 @@ main(int argc, const char *argv[])
output = stdout;
- opts = parse_command_line(argc, argv);
+ opts = parse_command_line(argc, (const char *const *)argv);
if (!opts) {
exit_value = EXIT_FAILURE;
@@ -277,7 +277,7 @@ run_test_loop(struct options *opts)
}
/* print size information */
- output_report("Transfer Buffer Size (bytes): %d\n", buf_bytes);
+ output_report("Transfer Buffer Size (bytes): %zu\n", buf_bytes);
output_report("File Size(MB): %.2f\n", ((double)parms.num_bytes) / ONE_MB);
print_indent(0);
@@ -299,25 +299,25 @@ run_test_loop(struct options *opts)
static int
run_test(iotype iot, parameters parms, struct options *opts)
{
- results res;
- register int i, ret_value = SUCCESS;
- off_t raw_size;
- minmax * write_sys_mm_table = NULL;
- minmax * write_mm_table = NULL;
- minmax * write_gross_mm_table = NULL;
- minmax * write_raw_mm_table = NULL;
- minmax * read_sys_mm_table = NULL;
- minmax * read_mm_table = NULL;
- minmax * read_gross_mm_table = NULL;
- minmax * read_raw_mm_table = NULL;
- minmax write_sys_mm = {0.0, 0.0, 0.0, 0};
- minmax write_mm = {0.0, 0.0, 0.0, 0};
- minmax write_gross_mm = {0.0, 0.0, 0.0, 0};
- minmax write_raw_mm = {0.0, 0.0, 0.0, 0};
- minmax read_sys_mm = {0.0, 0.0, 0.0, 0};
- minmax read_mm = {0.0, 0.0, 0.0, 0};
- minmax read_gross_mm = {0.0, 0.0, 0.0, 0};
- minmax read_raw_mm = {0.0, 0.0, 0.0, 0};
+ results res;
+ int i, ret_value = SUCCESS;
+ off_t raw_size;
+ minmax *write_sys_mm_table = NULL;
+ minmax *write_mm_table = NULL;
+ minmax *write_gross_mm_table = NULL;
+ minmax *write_raw_mm_table = NULL;
+ minmax *read_sys_mm_table = NULL;
+ minmax *read_mm_table = NULL;
+ minmax *read_gross_mm_table = NULL;
+ minmax *read_raw_mm_table = NULL;
+ minmax write_sys_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_gross_mm = {0.0, 0.0, 0.0, 0};
+ minmax write_raw_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_sys_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_gross_mm = {0.0, 0.0, 0.0, 0};
+ minmax read_raw_mm = {0.0, 0.0, 0.0, 0};
raw_size = (off_t)parms.num_bytes;
parms.io_type = iot;
@@ -657,10 +657,9 @@ output_report(const char *fmt, ...)
* things.
* Return: Nothing
* Programmer: Bill Wendling, 29. October 2001
- * Modifications:
*/
static void
-print_indent(register int indent)
+print_indent(int indent)
{
indent *= TAB_SPACE;
@@ -817,7 +816,7 @@ report_parameters(struct options *opts)
* Added multidimensional testing (Christian Chilan, April, 2008)
*/
static struct options *
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
int opt;
struct options *cl_opts;
diff --git a/tools/src/h5repack/PARALLEL_REPACK_readme.txt b/tools/src/h5repack/PARALLEL_REPACK_readme.txt
index a8e703e..0b9783a 100644
--- a/tools/src/h5repack/PARALLEL_REPACK_readme.txt
+++ b/tools/src/h5repack/PARALLEL_REPACK_readme.txt
@@ -64,7 +64,7 @@ of a parallel version of repack. A quick summary of this is discussion follows:
Q5. Can you collectively copy data using hyperslab selections
rather than having each MPI rank copy the data from the
- input dataset to the ouput dataset?
+ input dataset to the output dataset?
A5. Probably yes. I can look at implementing this improvement
in the current design.
diff --git a/tools/src/h5repack/create_h5file.c b/tools/src/h5repack/create_h5file.c
index 5efa325..910a2d4 100644
--- a/tools/src/h5repack/create_h5file.c
+++ b/tools/src/h5repack/create_h5file.c
@@ -207,13 +207,13 @@ create_attributes(hid_t group_id)
}
if (H5Aclose(attr_id) < 0) {
- fprintf(stderr, "failed to close the attribure\n");
+ fprintf(stderr, "failed to close the attribute\n");
goto error;
}
}
if (H5Sclose(attr_space) < 0) {
- fprintf(stderr, "failed to close the attribure space\n");
+ fprintf(stderr, "failed to close the attribute space\n");
goto error;
}
diff --git a/tools/src/h5repack/h5prepack_main.c b/tools/src/h5repack/h5prepack_main.c
index 78f174c..56754a4 100644
--- a/tools/src/h5repack/h5prepack_main.c
+++ b/tools/src/h5repack/h5prepack_main.c
@@ -40,13 +40,13 @@ static struct h5_long_options l_opts[] = {{"alignment", require_arg, 'a'},
{"file", require_arg, 'e'},
{"filter", require_arg, 'f'},
{"help", no_arg, 'h'},
- {"infile", require_arg, 'i'}, /* for backward compability */
+ {"infile", require_arg, 'i'}, /* for backward compatibility */
{"low", require_arg, 'j'},
{"high", require_arg, 'k'},
{"layout", require_arg, 'l'},
{"minimum", require_arg, 'm'},
{"native", no_arg, 'n'},
- {"outfile", require_arg, 'o'}, /* for backward compability */
+ {"outfile", require_arg, 'o'}, /* for backward compatibility */
{"sort_by", require_arg, 'q'},
{"ssize", require_arg, 's'},
{"threshold", require_arg, 't'},
@@ -380,7 +380,7 @@ read_info(const char *filename, pack_opt_t *options)
goto done;
}
- /* find begining of info */
+ /* find beginning of info */
i = 0;
c = '0';
while (c != ' ') {
diff --git a/tools/src/h5repack/h5repack_copy.c b/tools/src/h5repack/h5repack_copy.c
index a938fd6..60b3967 100644
--- a/tools/src/h5repack/h5repack_copy.c
+++ b/tools/src/h5repack/h5repack_copy.c
@@ -894,7 +894,7 @@ select_objs_by_rank(hid_t fidin, trav_table_t *orig, int **table_index)
for (k = 0; k < (int)orig->nobjs; k++) {
int mod_rank = (int)((int)k % g_nTasks);
- /* For debugging only, make it alwasy true - Ray */
+ /* For debugging only, make it always true - Ray */
// orig->objs[k].use_hyperslab = true;
if ((orig->objs[k].use_hyperslab) || (mod_rank == (int)g_nID)) {
@@ -1633,7 +1633,7 @@ pcopy_objects(hid_t fidin, hid_t fidout, trav_table_t *travt, int *obj_index, in
// read_time = 0.0;
write_time = 0.0;
- /* Potentially override the default size where we will transistion
+ /* Potentially override the default size where we will transition
* to using hyperslab selections to divide the work between mpi ranks.
*/
diff --git a/tools/src/h5repack/h5repack_main.c b/tools/src/h5repack/h5repack_main.c
index 0a26500..a805b3d 100644
--- a/tools/src/h5repack/h5repack_main.c
+++ b/tools/src/h5repack/h5repack_main.c
@@ -18,7 +18,7 @@
/* Name of tool */
#define PROGRAMNAME "h5repack"
-static int parse_command_line(int argc, const char **argv, pack_opt_t *options);
+static int parse_command_line(int argc, const char *const *argv, pack_opt_t *options);
static void leave(int ret) H5_ATTR_NORETURN;
/* module-scoped variables */
@@ -31,7 +31,7 @@ const char *outfile = NULL;
* Command-line options: The user can specify short or long-named
* parameters.
*/
-static const char *s_opts = "a:b:c:d:e:f:hi:j:k:l:m:no:q:s:t:u:v*z:EG:LM:P:S:T:VXWY:Z:1:2:3:4:5:6:7:8:9:0:";
+static const char *s_opts = "a:b:c:d:e:f:hi:j:k:l:m:no:q:s:t:u:v*z:E*G:LM:P:S:T:VXWY:Z:1:2:3:4:5:6:7:8:9:0:";
static struct h5_long_options l_opts[] = {{"alignment", require_arg, 'a'},
{"block", require_arg, 'b'},
{"compact", require_arg, 'c'},
@@ -52,7 +52,7 @@ static struct h5_long_options l_opts[] = {{"alignment", require_arg, 'a'},
{"ublock", require_arg, 'u'},
{"verbose", optional_arg, 'v'},
{"sort_order", require_arg, 'z'},
- {"enable-error-stack", no_arg, 'E'},
+ {"enable-error-stack", optional_arg, 'E'},
{"fs_pagesize", require_arg, 'G'},
{"latest", no_arg, 'L'},
{"metadata_block_size", require_arg, 'M'},
@@ -92,6 +92,11 @@ usage(const char *prog)
PRINTSTREAM(rawoutstream, "usage: %s [OPTIONS] file1 file2\n", prog);
PRINTVALSTREAM(rawoutstream, " file1 Input HDF5 File\n");
PRINTVALSTREAM(rawoutstream, " file2 Output HDF5 File\n");
+ PRINTVALSTREAM(rawoutstream, " ERROR\n");
+ PRINTVALSTREAM(rawoutstream,
+ " --enable-error-stack Prints messages from the HDF5 error stack as they occur.\n");
+ PRINTVALSTREAM(rawoutstream,
+ " Optional value 2 also prints file open errors.\n");
PRINTVALSTREAM(rawoutstream, " OPTIONS\n");
PRINTVALSTREAM(rawoutstream, " -h, --help Print a usage message and exit\n");
PRINTVALSTREAM(rawoutstream, " -v N, --verbose=N Verbose mode, print object information.\n");
@@ -99,9 +104,6 @@ usage(const char *prog)
PRINTVALSTREAM(rawoutstream, " -V, --version Print version number and exit\n");
PRINTVALSTREAM(rawoutstream, " -n, --native Use a native HDF5 type when repacking\n");
PRINTVALSTREAM(rawoutstream,
- " --enable-error-stack Prints messages from the HDF5 error stack as they\n");
- PRINTVALSTREAM(rawoutstream, " occur\n");
- PRINTVALSTREAM(rawoutstream,
" --src-vol-value Value (ID) of the VOL connector to use for opening the\n");
PRINTVALSTREAM(rawoutstream, " input HDF5 file specified\n");
PRINTVALSTREAM(rawoutstream,
@@ -507,7 +509,7 @@ set_sort_order(const char *form)
*-------------------------------------------------------------------------
*/
static int
-parse_command_line(int argc, const char **argv, pack_opt_t *options)
+parse_command_line(int argc, const char *const *argv, pack_opt_t *options)
{
h5tools_vol_info_t in_vol_info;
h5tools_vol_info_t out_vol_info;
@@ -701,7 +703,7 @@ parse_command_line(int argc, const char **argv, pack_opt_t *options)
case 'a':
options->alignment = HDstrtoull(H5_optarg, NULL, 0);
if (options->alignment < 1) {
- error_msg("invalid alignment size\n", H5_optarg);
+ error_msg("invalid alignment size `%s`\n", H5_optarg);
h5tools_setstatus(EXIT_FAILURE);
ret_value = -1;
goto done;
@@ -721,7 +723,7 @@ parse_command_line(int argc, const char **argv, pack_opt_t *options)
else if (!HDstrcmp(strategy, "NONE"))
options->fs_strategy = H5F_FSPACE_STRATEGY_NONE;
else {
- error_msg("invalid file space management strategy\n", H5_optarg);
+ error_msg("invalid file space management strategy `%s`\n", H5_optarg);
h5tools_setstatus(EXIT_FAILURE);
ret_value = -1;
goto done;
@@ -771,7 +773,10 @@ parse_command_line(int argc, const char **argv, pack_opt_t *options)
break;
case 'E':
- enable_error_stack = 1;
+ if (H5_optarg != NULL)
+ enable_error_stack = HDatoi(H5_optarg);
+ else
+ enable_error_stack = 1;
break;
case '1':
@@ -928,7 +933,7 @@ done:
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char **argv)
+main(int argc, char **argv)
{
pack_opt_t options; /*the global options */
int parse_ret;
@@ -958,7 +963,7 @@ main(int argc, const char **argv)
/* Initialize default indexing options */
sort_by = H5_INDEX_CRT_ORDER;
- parse_ret = parse_command_line(argc, argv, &options);
+ parse_ret = parse_command_line(argc, (const char *const *)argv, &options);
if (parse_ret < 0) {
HDprintf("Error occurred while parsing command-line options\n");
h5tools_setstatus(EXIT_FAILURE);
diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c
index 6b01e49..e42af10 100644
--- a/tools/src/h5stat/h5stat.c
+++ b/tools/src/h5stat/h5stat.c
@@ -169,7 +169,7 @@ struct handler_t {
char **obj;
};
-static const char *s_opts = "Aa:Ddm:EFfhGgl:sSTO:Vw:H:";
+static const char *s_opts = "Aa:Ddm:E*FfhGgl:sSTO:Vw:H:";
/* e.g. "filemetadata" has to precede "file"; "groupmetadata" has to precede "group" etc. */
static struct h5_long_options l_opts[] = {{"help", no_arg, 'h'},
{"filemetadata", no_arg, 'F'},
@@ -181,7 +181,7 @@ static struct h5_long_options l_opts[] = {{"help", no_arg, 'h'},
{"object", require_arg, 'O'},
{"version", no_arg, 'V'},
{"attribute", no_arg, 'A'},
- {"enable-error-stack", no_arg, 'E'},
+ {"enable-error-stack", optional_arg, 'E'},
{"numattrs", require_arg, 'a'},
{"freespace", no_arg, 's'},
{"summary", no_arg, 'S'},
@@ -211,6 +211,9 @@ usage(const char *prog)
HDfflush(stdout);
HDfprintf(stdout, "Usage: %s [OPTIONS] file\n", prog);
HDfprintf(stdout, "\n");
+ HDfprintf(stdout, " ERROR\n");
+ HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n");
+ HDfprintf(stdout, " Optional value 2 also prints file open errors\n");
HDfprintf(stdout, " OPTIONS\n");
HDfprintf(stdout, " -h, --help Print a usage message and exit\n");
HDfprintf(stdout, " -V, --version Print version number and exit\n");
@@ -235,7 +238,6 @@ usage(const char *prog)
HDfprintf(stdout, " than 0. The default threshold is 10.\n");
HDfprintf(stdout, " -s, --freespace Print free space information\n");
HDfprintf(stdout, " -S, --summary Print summary of file space information\n");
- HDfprintf(stdout, " --enable-error-stack Prints messages from the HDF5 error stack as they occur\n");
HDfprintf(stdout, " --s3-cred=<cred> Access file on S3, using provided credential\n");
HDfprintf(stdout, " <cred> :: (region,id,key)\n");
HDfprintf(stdout, " If <cred> == \"(,,)\", no authentication is used.\n");
@@ -830,7 +832,7 @@ hand_free(struct handler_t *hand)
*-------------------------------------------------------------------------
*/
static int
-parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
+parse_command_line(int argc, const char *const *argv, struct handler_t **hand_ret)
{
int opt;
unsigned u;
@@ -852,7 +854,10 @@ parse_command_line(int argc, const char *argv[], struct handler_t **hand_ret)
break;
case 'E':
- enable_error_stack = 1;
+ if (H5_optarg != NULL)
+ enable_error_stack = HDatoi(H5_optarg);
+ else
+ enable_error_stack = 1;
break;
case 'F':
@@ -1677,7 +1682,7 @@ print_statistics(const char *name, const iter_t *iter)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
iter_t iter;
const char * fname = NULL;
@@ -1693,7 +1698,7 @@ main(int argc, const char *argv[])
HDmemset(&iter, 0, sizeof(iter));
- if (parse_command_line(argc, argv, &hand) < 0)
+ if (parse_command_line(argc, (const char *const *)argv, &hand) < 0)
goto done;
/* enable error reporting if command line option */
@@ -1736,7 +1741,7 @@ main(int argc, const char *argv[])
warn_msg("Unable to retrieve file size\n");
HDassert(iter.filesize != 0);
- /* Get storge info for file-level structures */
+ /* Get storage info for file-level structures */
if (H5Fget_info2(fid, &finfo) < 0)
warn_msg("Unable to retrieve file info\n");
else {
diff --git a/tools/src/misc/h5clear.c b/tools/src/misc/h5clear.c
index face2f0..15c170d 100644
--- a/tools/src/misc/h5clear.c
+++ b/tools/src/misc/h5clear.c
@@ -109,7 +109,7 @@ usage(const char *prog)
*-------------------------------------------------------------------------
*/
static int
-parse_command_line(int argc, const char **argv)
+parse_command_line(int argc, const char *const *argv)
{
int opt;
@@ -224,7 +224,7 @@ leave(int ret)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
char * fname = NULL; /* File name */
hid_t fapl = H5I_INVALID_HID; /* File access property list */
@@ -240,7 +240,7 @@ main(int argc, const char *argv[])
h5tools_init();
/* Parse command line options */
- if (parse_command_line(argc, argv) < 0)
+ if (parse_command_line(argc, (const char *const *)argv) < 0)
goto done;
if (fname_g == NULL)
diff --git a/tools/src/misc/h5delete.c b/tools/src/misc/h5delete.c
index 3c4f8d5..20e81a1 100644
--- a/tools/src/misc/h5delete.c
+++ b/tools/src/misc/h5delete.c
@@ -29,7 +29,7 @@ usage(void)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hbool_t quiet = FALSE;
const char *name = NULL;
diff --git a/tools/src/misc/h5mkgrp.c b/tools/src/misc/h5mkgrp.c
index 1e66fce..1b8b23d 100644
--- a/tools/src/misc/h5mkgrp.c
+++ b/tools/src/misc/h5mkgrp.c
@@ -131,7 +131,7 @@ usage(const char *prog)
*-------------------------------------------------------------------------
*/
static int
-parse_command_line(int argc, const char *argv[], mkgrp_opt_t *options)
+parse_command_line(int argc, const char *const *argv, mkgrp_opt_t *options)
{
int opt; /* Option from command line */
size_t curr_group; /* Current group name to copy */
@@ -281,7 +281,7 @@ parse_command_line(int argc, const char *argv[], mkgrp_opt_t *options)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
hid_t lcpl_id = H5I_INVALID_HID; /* Link creation property list ID */
@@ -303,7 +303,7 @@ main(int argc, const char *argv[])
}
/* Parse command line */
- if (parse_command_line(argc, argv, &params_g) < 0) {
+ if (parse_command_line(argc, (const char *const *)argv, &params_g) < 0) {
error_msg("unable to parse command line arguments\n");
leave(EXIT_FAILURE);
}
diff --git a/tools/test/h5copy/h5copygentest.c b/tools/test/h5copy/h5copygentest.c
index c1f8349..e415c97 100644
--- a/tools/test/h5copy/h5copygentest.c
+++ b/tools/test/h5copy/h5copygentest.c
@@ -923,7 +923,7 @@ out:
/*-------------------------------------------------------------------------
* Function: Test_Extlink_Copy
*
- * Purpose: gerenate external link files
+ * Purpose: generate external link files
*
*------------------------------------------------------------------------*/
static void
diff --git a/tools/test/h5diff/testfiles/h5diff_10.txt b/tools/test/h5diff/testfiles/h5diff_10.txt
index 853aede..b1d607e 100644
--- a/tools/test/h5diff/testfiles/h5diff_10.txt
+++ b/tools/test/h5diff/testfiles/h5diff_10.txt
@@ -4,6 +4,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -22,8 +25,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_600.txt b/tools/test/h5diff/testfiles/h5diff_600.txt
index 2893b78..5236964 100644
--- a/tools/test/h5diff/testfiles/h5diff_600.txt
+++ b/tools/test/h5diff/testfiles/h5diff_600.txt
@@ -4,6 +4,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -22,8 +25,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_603.txt b/tools/test/h5diff/testfiles/h5diff_603.txt
index 4317282..e7bad17 100644
--- a/tools/test/h5diff/testfiles/h5diff_603.txt
+++ b/tools/test/h5diff/testfiles/h5diff_603.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_606.txt b/tools/test/h5diff/testfiles/h5diff_606.txt
index c32142f..410528d 100644
--- a/tools/test/h5diff/testfiles/h5diff_606.txt
+++ b/tools/test/h5diff/testfiles/h5diff_606.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_612.txt b/tools/test/h5diff/testfiles/h5diff_612.txt
index a080f29..ffc191e 100644
--- a/tools/test/h5diff/testfiles/h5diff_612.txt
+++ b/tools/test/h5diff/testfiles/h5diff_612.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_615.txt b/tools/test/h5diff/testfiles/h5diff_615.txt
index a9bd76c..2ff50e9 100644
--- a/tools/test/h5diff/testfiles/h5diff_615.txt
+++ b/tools/test/h5diff/testfiles/h5diff_615.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_621.txt b/tools/test/h5diff/testfiles/h5diff_621.txt
index cf1af15..7db4959 100644
--- a/tools/test/h5diff/testfiles/h5diff_621.txt
+++ b/tools/test/h5diff/testfiles/h5diff_621.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_622.txt b/tools/test/h5diff/testfiles/h5diff_622.txt
index 3b325de..db77f88 100644
--- a/tools/test/h5diff/testfiles/h5diff_622.txt
+++ b/tools/test/h5diff/testfiles/h5diff_622.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_623.txt b/tools/test/h5diff/testfiles/h5diff_623.txt
index be34fb4..11739be 100644
--- a/tools/test/h5diff/testfiles/h5diff_623.txt
+++ b/tools/test/h5diff/testfiles/h5diff_623.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5diff/testfiles/h5diff_624.txt b/tools/test/h5diff/testfiles/h5diff_624.txt
index ebb76af..6af9fe3 100644
--- a/tools/test/h5diff/testfiles/h5diff_624.txt
+++ b/tools/test/h5diff/testfiles/h5diff_624.txt
@@ -5,6 +5,9 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
[obj1] Name of an HDF5 object, in absolute path
[obj2] Name of an HDF5 object, in absolute path
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help
Print a usage message and exit.
@@ -23,8 +26,6 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
3 : All level 2 information plus file names.
-q, --quiet
Quiet mode. Do not produce output.
- --enable-error-stack
- Prints messages from the HDF5 error stack as they occur.
--vol-value-1 Value (ID) of the VOL connector to use for opening the
first HDF5 file specified
--vol-name-1 Name of the VOL connector to use for opening the first
diff --git a/tools/test/h5dump/binread.c b/tools/test/h5dump/binread.c
index d6206ca..341a085 100644
--- a/tools/test/h5dump/binread.c
+++ b/tools/test/h5dump/binread.c
@@ -59,7 +59,7 @@ usage(void)
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
FILE * stream;
size_t numread;
diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c
index 5258211..339b563 100644
--- a/tools/test/h5dump/h5dumpgentest.c
+++ b/tools/test/h5dump/h5dumpgentest.c
@@ -3914,9 +3914,9 @@ gent_multi(void)
static void
gent_large_objname(void)
{
- hid_t fid, group, group2;
- char grp_name[128];
- register int i;
+ hid_t fid, group, group2;
+ char grp_name[128];
+ int i;
fid = H5Fcreate(FILE37, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
diff --git a/tools/test/h5jam/getub.c b/tools/test/h5jam/getub.c
index fd21d6c..4e67e98 100644
--- a/tools/test/h5jam/getub.c
+++ b/tools/test/h5jam/getub.c
@@ -15,7 +15,7 @@
#include "h5tools.h"
#include "h5tools_utils.h"
-void parse_command_line(int argc, const char *argv[]);
+void parse_command_line(int argc, const char *const *argv);
/* Name of tool */
#define PROGRAM_NAME "getub"
@@ -52,7 +52,7 @@ usage(const char *prog)
*-------------------------------------------------------------------------
*/
void
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
int opt;
@@ -77,7 +77,7 @@ parse_command_line(int argc, const char *argv[])
} /* end parse_command_line() */
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
int fd = H5I_INVALID_HID;
unsigned size;
@@ -91,7 +91,7 @@ main(int argc, const char *argv[])
/* Initialize h5tools lib */
h5tools_init();
- parse_command_line(argc, argv);
+ parse_command_line(argc, (const char *const *)argv);
if (NULL == nbytes) {
/* missing arg */
diff --git a/tools/test/h5jam/tellub.c b/tools/test/h5jam/tellub.c
index e6769ec..f264d9b 100644
--- a/tools/test/h5jam/tellub.c
+++ b/tools/test/h5jam/tellub.c
@@ -56,7 +56,7 @@ usage(const char *prog)
*/
static void
-parse_command_line(int argc, const char *argv[])
+parse_command_line(int argc, const char *const *argv)
{
int opt;
@@ -98,7 +98,7 @@ leave(int ret)
*-------------------------------------------------------------------------
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
char * ifname;
hid_t ifile = H5I_INVALID_HID;
@@ -113,7 +113,7 @@ main(int argc, const char *argv[])
/* Initialize h5tools lib */
h5tools_init();
- parse_command_line(argc, argv);
+ parse_command_line(argc, (const char *const *)argv);
/* enable error reporting if command line option */
h5tools_error_report();
diff --git a/tools/test/h5repack/h5repackgentest.c b/tools/test/h5repack/h5repackgentest.c
index b026cc0..c025891 100644
--- a/tools/test/h5repack/h5repackgentest.c
+++ b/tools/test/h5repack/h5repackgentest.c
@@ -307,7 +307,7 @@ generate_f32le(hbool_t external)
/* Generate values */
for (i = 0, k = 0, n = 0; (hsize_t)i < dims[0]; i++) {
for (j = 0; (hsize_t)j < dims[1]; j++, k++, n++) {
- wdata[k] = n * 801.1f * ((k % 5 == 1) ? (-1) : (1));
+ wdata[k] = n * 801.1F * ((k % 5 == 1) ? (-1) : (1));
}
}
diff --git a/tools/test/h5repack/testfiles/h5repack-help.txt b/tools/test/h5repack/testfiles/h5repack-help.txt
index c1caf52..bff70af 100644
--- a/tools/test/h5repack/testfiles/h5repack-help.txt
+++ b/tools/test/h5repack/testfiles/h5repack-help.txt
@@ -1,14 +1,15 @@
usage: h5repack [OPTIONS] file1 file2
file1 Input HDF5 File
file2 Output HDF5 File
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
OPTIONS
-h, --help Print a usage message and exit
-v N, --verbose=N Verbose mode, print object information.
N - is an integer greater than 1, 2 displays read/write timing
-V, --version Print version number and exit
-n, --native Use a native HDF5 type when repacking
- --enable-error-stack Prints messages from the HDF5 error stack as they
- occur
--src-vol-value Value (ID) of the VOL connector to use for opening the
input HDF5 file specified
--src-vol-name Name of the VOL connector to use for opening the input
diff --git a/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl b/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
index 15ae813..0992a0c 100644
--- a/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
+++ b/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
@@ -11,7 +11,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
@@ -33,7 +33,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
@@ -55,7 +55,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
@@ -77,7 +77,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
@@ -99,7 +99,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
@@ -121,7 +121,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
@@ -143,7 +143,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 13 1 }
+ PARAMS { 9 1 13 2 }
}
}
FILLVALUE {
diff --git a/tools/test/h5stat/testfiles/h5stat_help1.ddl b/tools/test/h5stat/testfiles/h5stat_help1.ddl
index 2ba7772..1f65f0d 100644
--- a/tools/test/h5stat/testfiles/h5stat_help1.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_help1.ddl
@@ -1,5 +1,8 @@
Usage: h5stat [OPTIONS] file
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur
+ Optional value 2 also prints file open errors
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
@@ -22,7 +25,6 @@ Usage: h5stat [OPTIONS] file
than 0. The default threshold is 10.
-s, --freespace Print free space information
-S, --summary Print summary of file space information
- --enable-error-stack Prints messages from the HDF5 error stack as they occur
--s3-cred=<cred> Access file on S3, using provided credential
<cred> :: (region,id,key)
If <cred> == "(,,)", no authentication is used.
diff --git a/tools/test/h5stat/testfiles/h5stat_help2.ddl b/tools/test/h5stat/testfiles/h5stat_help2.ddl
index 2ba7772..1f65f0d 100644
--- a/tools/test/h5stat/testfiles/h5stat_help2.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_help2.ddl
@@ -1,5 +1,8 @@
Usage: h5stat [OPTIONS] file
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur
+ Optional value 2 also prints file open errors
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
@@ -22,7 +25,6 @@ Usage: h5stat [OPTIONS] file
than 0. The default threshold is 10.
-s, --freespace Print free space information
-S, --summary Print summary of file space information
- --enable-error-stack Prints messages from the HDF5 error stack as they occur
--s3-cred=<cred> Access file on S3, using provided credential
<cred> :: (region,id,key)
If <cred> == "(,,)", no authentication is used.
diff --git a/tools/test/h5stat/testfiles/h5stat_nofile.ddl b/tools/test/h5stat/testfiles/h5stat_nofile.ddl
index 2ba7772..1f65f0d 100644
--- a/tools/test/h5stat/testfiles/h5stat_nofile.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_nofile.ddl
@@ -1,5 +1,8 @@
Usage: h5stat [OPTIONS] file
+ ERROR
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur
+ Optional value 2 also prints file open errors
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
@@ -22,7 +25,6 @@ Usage: h5stat [OPTIONS] file
than 0. The default threshold is 10.
-s, --freespace Print free space information
-S, --summary Print summary of file space information
- --enable-error-stack Prints messages from the HDF5 error stack as they occur
--s3-cred=<cred> Access file on S3, using provided credential
<cred> :: (region,id,key)
If <cred> == "(,,)", no authentication is used.
diff --git a/tools/test/misc/h5clear_gentest.c b/tools/test/misc/h5clear_gentest.c
index 225fe16..97feabd 100644
--- a/tools/test/misc/h5clear_gentest.c
+++ b/tools/test/misc/h5clear_gentest.c
@@ -428,7 +428,7 @@ main(void)
if ((my_fapl = H5Pcopy(fapl2)) < 0)
goto error;
/* Create the file */
- HDsprintf(fname, "%s%s", new_format ? "latest_" : "", FILENAME[0]);
+ HDsnprintf(fname, sizeof(fname), "%s%s", new_format ? "latest_" : "", FILENAME[0]);
if ((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT,
my_fapl)) < 0)
goto error;
@@ -453,7 +453,7 @@ main(void)
goto error;
/* Create the file */
- HDsprintf(fname, "%s%s", new_format ? "latest_" : "", FILENAME[1]);
+ HDsnprintf(fname, sizeof(fname), "%s%s", new_format ? "latest_" : "", FILENAME[1]);
if ((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT,
my_fapl)) < 0)
goto error;
diff --git a/tools/test/perform/direct_write_perf.c b/tools/test/perform/direct_write_perf.c
index 2076cf4..1d7756b 100644
--- a/tools/test/perform/direct_write_perf.c
+++ b/tools/test/perform/direct_write_perf.c
@@ -655,7 +655,7 @@ main(void)
hid_t fapl = H5P_DEFAULT;
int i;
- sprintf(filename, "%s.h5", FILENAME[0]);
+ snprintf(filename, sizeof(filename), "%s.h5", FILENAME[0]);
create_file(fapl);
test_direct_write_uncompressed_data(fapl);
diff --git a/tools/test/perform/perf_meta.c b/tools/test/perform/perf_meta.c
index d8ed9ec..ad76711 100644
--- a/tools/test/perform/perf_meta.c
+++ b/tools/test/perform/perf_meta.c
@@ -304,7 +304,7 @@ create_dsets(hid_t file)
* Create a dataset using the default dataset creation properties.
*/
for (i = 0; i < NUM_DSETS; i++) {
- HDsprintf(dset_name, "dataset %d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "dataset %d", i);
if ((dataset = H5Dcreate2(file, dset_name, H5T_NATIVE_DOUBLE, space, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT)) < 0)
goto error;
@@ -366,14 +366,14 @@ create_attrs_1(void)
* Create all(user specifies the number) attributes for each dataset
*/
for (i = 0; i < NUM_DSETS; i++) {
- HDsprintf(dset_name, "dataset %d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "dataset %d", i);
open_t.start = retrieve_time();
if ((dataset = H5Dopen2(file, dset_name, H5P_DEFAULT)) < 0)
goto error;
perf(&open_t, open_t.start, retrieve_time());
for (j = 0; j < NUM_ATTRS; j++) {
- HDsprintf(attr_name, "all attrs for each dset %d", j);
+ HDsnprintf(attr_name, sizeof(attr_name), "all attrs for each dset %d", j);
attr_t.start = retrieve_time();
if ((attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_DOUBLE, small_space, H5P_DEFAULT,
H5P_DEFAULT)) < 0)
@@ -468,7 +468,7 @@ create_attrs_2(void)
* Create all(user specifies the number) attributes for each new dataset
*/
for (i = 0; i < NUM_DSETS; i++) {
- HDsprintf(dset_name, "dataset %d", i);
+ HDsnprintf(dset_name, sizeof(dset_name), "dataset %d", i);
create_t.start = retrieve_time();
if ((dataset = H5Dcreate2(file, dset_name, H5T_NATIVE_DOUBLE, space, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT)) < 0)
@@ -476,7 +476,7 @@ create_attrs_2(void)
perf(&create_t, create_t.start, retrieve_time());
for (j = 0; j < NUM_ATTRS; j++) {
- HDsprintf(attr_name, "all attrs for each dset %d", j);
+ HDsnprintf(attr_name, sizeof(attr_name), "all attrs for each dset %d", j);
attr_t.start = retrieve_time();
if ((attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_DOUBLE, small_space, H5P_DEFAULT,
H5P_DEFAULT)) < 0)
@@ -579,14 +579,14 @@ create_attrs_3(void)
for (i = 0; i < loop_num; i++) {
for (j = 0; j < NUM_DSETS; j++) {
- HDsprintf(dset_name, "dataset %d", j);
+ HDsnprintf(dset_name, sizeof(dset_name), "dataset %d", j);
open_t.start = retrieve_time();
if ((dataset = H5Dopen2(file, dset_name, H5P_DEFAULT)) < 0)
goto error;
perf(&open_t, open_t.start, retrieve_time());
for (k = 0; k < BATCH_ATTRS; k++) {
- HDsprintf(attr_name, "some attrs for each dset %d %d", i, k);
+ HDsnprintf(attr_name, sizeof(attr_name), "some attrs for each dset %d %d", i, k);
attr_t.start = retrieve_time();
if ((attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_DOUBLE, small_space, H5P_DEFAULT,
H5P_DEFAULT)) < 0)
diff --git a/tools/test/perform/pio_standalone.c b/tools/test/perform/pio_standalone.c
index 032bfba..a074d2c 100644
--- a/tools/test/perform/pio_standalone.c
+++ b/tools/test/perform/pio_standalone.c
@@ -92,7 +92,7 @@ get_option(int argc, const char **argv, const char *opts, const struct h5_long_o
sp = 1;
}
else {
- register char *cp; /* pointer into current token */
+ char *cp; /* pointer into current token */
/* short command line option */
opt_opt = argv[H5_optind][sp];
diff --git a/tools/test/perform/pio_standalone.h b/tools/test/perform/pio_standalone.h
index f1fb946..e64a765 100644
--- a/tools/test/perform/pio_standalone.h
+++ b/tools/test/perform/pio_standalone.h
@@ -461,7 +461,8 @@ struct h5_long_options {
*/
};
-extern int H5_get_option(int argc, const char **argv, const char *opt, const struct h5_long_options *l_opt);
+extern int H5_get_option(int argc, const char *const *argv, const char *opt,
+ const struct h5_long_options *l_opt);
extern int nCols; /*max number of columns for outputting */
diff --git a/tools/test/perform/sio_standalone.c b/tools/test/perform/sio_standalone.c
index 7c22b53..353dad3 100644
--- a/tools/test/perform/sio_standalone.c
+++ b/tools/test/perform/sio_standalone.c
@@ -92,7 +92,7 @@ get_option(int argc, const char **argv, const char *opts, const struct h5_long_o
sp = 1;
}
else {
- register char *cp; /* pointer into current token */
+ char *cp; /* pointer into current token */
/* short command line option */
opt_opt = argv[H5_optind][sp];
diff --git a/tools/test/perform/sio_standalone.h b/tools/test/perform/sio_standalone.h
index 99cca75..57180e5 100644
--- a/tools/test/perform/sio_standalone.h
+++ b/tools/test/perform/sio_standalone.h
@@ -476,7 +476,8 @@ struct h5_long_options {
*/
};
-extern int H5_get_option(int argc, const char **argv, const char *opt, const struct h5_long_options *l_opt);
+extern int H5_get_option(int argc, const char *const *argv, const char *opt,
+ const struct h5_long_options *l_opt);
extern int nCols; /*max number of columns for outputting */
diff --git a/tools/test/perform/zip_perf.c b/tools/test/perform/zip_perf.c
index 37e69e2..87a7122 100644
--- a/tools/test/perform/zip_perf.c
+++ b/tools/test/perform/zip_perf.c
@@ -336,8 +336,8 @@ parse_size_directive(const char *size)
static void
fill_with_random_data(Bytef *src, uLongf src_len)
{
- register unsigned u;
- h5_stat_t stat_buf;
+ unsigned u;
+ h5_stat_t stat_buf;
if (HDstat("/dev/urandom", &stat_buf) == 0) {
uLongf len = src_len;
@@ -386,7 +386,7 @@ do_write_test(unsigned long file_size, unsigned long min_buf_size, unsigned long
Bytef * src;
for (src_len = min_buf_size; src_len <= max_buf_size; src_len <<= 1) {
- register unsigned long i, iters;
+ unsigned long i, iters;
iters = file_size / src_len;
src = (Bytef *)HDcalloc(1, sizeof(Bytef) * src_len);
@@ -489,7 +489,7 @@ do_write_test(unsigned long file_size, unsigned long min_buf_size, unsigned long
* Modifications:
*/
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
unsigned long min_buf_size = 128 * ONE_KB, max_buf_size = ONE_MB;
unsigned long file_size = 64 * ONE_MB;
@@ -500,7 +500,7 @@ main(int argc, const char *argv[])
/* Initialize h5tools lib */
h5tools_init();
- while ((opt = H5_get_option(argc, argv, s_opts, l_opts)) > 0) {
+ while ((opt = H5_get_option(argc, (const char *const *)argv, s_opts, l_opts)) > 0) {
switch ((char)opt) {
case '0':
case '1':
diff --git a/tools/testfiles/h5dump-help.txt b/tools/testfiles/h5dump-help.txt
index 53c666b..e20df73 100644
--- a/tools/testfiles/h5dump-help.txt
+++ b/tools/testfiles/h5dump-help.txt
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
+++ b/tools/testfiles/pbits/tnofilename-with-packed-bits.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tpbitsIncomplete.ddl b/tools/testfiles/pbits/tpbitsIncomplete.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tpbitsIncomplete.ddl
+++ b/tools/testfiles/pbits/tpbitsIncomplete.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tpbitsLengthExceeded.ddl
+++ b/tools/testfiles/pbits/tpbitsLengthExceeded.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tpbitsLengthPositive.ddl b/tools/testfiles/pbits/tpbitsLengthPositive.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tpbitsLengthPositive.ddl
+++ b/tools/testfiles/pbits/tpbitsLengthPositive.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tpbitsMaxExceeded.ddl
+++ b/tools/testfiles/pbits/tpbitsMaxExceeded.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
+++ b/tools/testfiles/pbits/tpbitsOffsetExceeded.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl
index 53c666b..e20df73 100644
--- a/tools/testfiles/pbits/tpbitsOffsetNegative.ddl
+++ b/tools/testfiles/pbits/tpbitsOffsetNegative.ddl
@@ -2,6 +2,10 @@ usage: h5dump [OPTIONS] files
OPTIONS
-h, --help Print a usage message and exit
-V, --version Print version number and exit
+--------------- Error Options ---------------
+ --enable-error-stack Prints messages from the HDF5 error stack as they occur.
+ Optional value 2 also prints file open errors.
+ Default setting disables any error reporting.
--------------- File Options ---------------
-n, --contents Print a list of the file contents and exit
Optional value 1 also prints attributes.
@@ -66,8 +70,6 @@ usage: h5dump [OPTIONS] files
-m T, --format=T Set the floating point output format
-q Q, --sort_by=Q Sort groups and attributes by index Q
-z Z, --sort_order=Z Sort groups and attributes by order Z
- --enable-error-stack Prints messages from the HDF5 error stack as they occur.
- Optional value 2 also prints file open errors.
--no-compact-subset Disable compact form of subsetting and allow the use
of "[" in dataset names.
-w N, --width=N Set the number of columns of output. A value of 0 (zero)
diff --git a/utils/mirror_vfd/mirror_remote.c b/utils/mirror_vfd/mirror_remote.c
index f61580d..8bb9544 100644
--- a/utils/mirror_vfd/mirror_remote.c
+++ b/utils/mirror_vfd/mirror_remote.c
@@ -147,7 +147,7 @@ mirror_log_bytes(struct mirror_log_info *info, unsigned int level, size_t n_byte
* ----------------------------------------------------------------------------
*/
loginfo_t *
-mirror_log_init(char *path, char *prefix, unsigned int verbosity)
+mirror_log_init(char *path, const char *prefix, unsigned int verbosity)
{
loginfo_t *info = NULL;
diff --git a/utils/mirror_vfd/mirror_remote.h b/utils/mirror_vfd/mirror_remote.h
index 6f37b0b..9415f7f 100644
--- a/utils/mirror_vfd/mirror_remote.h
+++ b/utils/mirror_vfd/mirror_remote.h
@@ -42,7 +42,7 @@ typedef struct mirror_log_info {
void mirror_log(loginfo_t *info, unsigned int level, const char *format, ...);
void mirror_log_bytes(loginfo_t *info, unsigned int level, size_t n_bytes, const unsigned char *buf);
-loginfo_t *mirror_log_init(char *path, char *prefix, unsigned int verbosity);
+loginfo_t *mirror_log_init(char *path, const char *prefix, unsigned int verbosity);
int mirror_log_term(loginfo_t *loginfo);
herr_t run_writer(int socketfd, H5FD_mirror_xmit_open_t *xmit_open);
diff --git a/utils/mirror_vfd/mirror_server.c b/utils/mirror_vfd/mirror_server.c
index f6a8bfe..5381d95 100644
--- a/utils/mirror_vfd/mirror_server.c
+++ b/utils/mirror_vfd/mirror_server.c
@@ -94,14 +94,14 @@
* ---------------------------------------------------------------------------
*/
struct op_args {
- uint32_t magic;
- int help;
- int main_port;
- int verbosity;
- int log_prepend_serv;
- int log_prepend_type;
- char log_path[PATH_MAX + 1];
- char writer_log_path[PATH_MAX + 1];
+ uint32_t magic;
+ int help;
+ int main_port;
+ unsigned int verbosity;
+ int log_prepend_serv;
+ int log_prepend_type;
+ char log_path[PATH_MAX + 1];
+ char writer_log_path[PATH_MAX + 1];
};
/* ---------------------------------------------------------------------------
@@ -224,7 +224,7 @@ parse_args(int argc, char **argv, struct op_args *args_out)
} /* end if port */
else if (!HDstrncmp(argv[i], "--verbosity=", 12)) {
mirror_log(NULL, V_INFO, "parsing 'verbosity' (%s)", argv[i] + 12);
- args_out->verbosity = HDatoi(argv[i] + 12);
+ args_out->verbosity = (unsigned int)HDatoi(argv[i] + 12);
} /* end if verbosity */
else if (!HDstrncmp(argv[i], "--logpath=", 10)) {
mirror_log(NULL, V_INFO, "parsing 'logpath' (%s)", argv[i] + 10);
@@ -456,7 +456,7 @@ error:
* ---------------------------------------------------------------------------
*/
static void
-wait_for_child(int sig)
+wait_for_child(int H5_ATTR_UNUSED sig)
{
while (HDwaitpid(-1, NULL, WNOHANG) > 0)
;
@@ -476,7 +476,7 @@ handle_requests(struct server_run *run)
{
int connfd = -1; /**/
char mybuf[H5FD_MIRROR_XMIT_OPEN_SIZE]; /**/
- int ret; /* general-purpose error-checking */
+ ssize_t ret; /* general-purpose error-checking */
int pid; /* process ID of fork */
struct sigaction sa;
int ret_value = 0;
@@ -521,14 +521,13 @@ handle_requests(struct server_run *run)
/* Read handshake from port connection.
*/
- ret = (int)HDread(connfd, &mybuf, H5FD_MIRROR_XMIT_OPEN_SIZE);
- if (-1 == ret) {
+ if ((ret = HDread(connfd, &mybuf, H5FD_MIRROR_XMIT_OPEN_SIZE)) < 0) {
mirror_log(run->loginfo, V_ERR, "read:%d", ret);
goto error;
}
mirror_log(run->loginfo, V_INFO, "received %d bytes", ret);
mirror_log(run->loginfo, V_ALL, "```");
- mirror_log_bytes(run->loginfo, V_ALL, ret, (const unsigned char *)mybuf);
+ mirror_log_bytes(run->loginfo, V_ALL, (size_t)ret, (const unsigned char *)mybuf);
mirror_log(run->loginfo, V_ALL, "```");
/* Respond to handshake message.
diff --git a/utils/mirror_vfd/mirror_writer.c b/utils/mirror_vfd/mirror_writer.c
index d3f12de..5726db5 100644
--- a/utils/mirror_vfd/mirror_writer.c
+++ b/utils/mirror_vfd/mirror_writer.c
@@ -788,8 +788,7 @@ do_write(struct mirror_session *session, const unsigned char *xmit_buf)
*/
sum_bytes_written = 0;
do {
- nbytes_in_packet = HDread(session->sockfd, buf, H5FD_MIRROR_DATA_BUFFER_MAX);
- if (-1 == nbytes_in_packet) {
+ if ((nbytes_in_packet = HDread(session->sockfd, buf, H5FD_MIRROR_DATA_BUFFER_MAX)) < 0) {
mirror_log(session->loginfo, V_ERR, "can't read into databuffer");
reply_error(session, "can't read data buffer");
return -1;
@@ -798,7 +797,7 @@ do_write(struct mirror_session *session, const unsigned char *xmit_buf)
mirror_log(session->loginfo, V_INFO, "received %zd bytes", nbytes_in_packet);
if (HEXDUMP_WRITEDATA) {
mirror_log(session->loginfo, V_ALL, "DATA:\n```");
- mirror_log_bytes(session->loginfo, V_ALL, nbytes_in_packet, (const unsigned char *)buf);
+ mirror_log_bytes(session->loginfo, V_ALL, (size_t)nbytes_in_packet, (const unsigned char *)buf);
mirror_log(session->loginfo, V_ALL, "```");
}
@@ -859,8 +858,7 @@ receive_communique(struct mirror_session *session, struct sock_comm *comm)
mirror_log(session->loginfo, V_INFO, "ready to receive"); /* TODO */
- read_ret = HDread(session->sockfd, comm->raw, H5FD_MIRROR_XMIT_BUFFER_MAX);
- if (-1 == read_ret) {
+ if ((read_ret = HDread(session->sockfd, comm->raw, H5FD_MIRROR_XMIT_BUFFER_MAX)) < 0) {
mirror_log(session->loginfo, V_ERR, "read:%zd", read_ret);
goto error;
}
diff --git a/utils/tools/h5dwalk/h5dwalk.c b/utils/tools/h5dwalk/h5dwalk.c
index a1e66e0..5a22d75 100644
--- a/utils/tools/h5dwalk/h5dwalk.c
+++ b/utils/tools/h5dwalk/h5dwalk.c
@@ -1071,12 +1071,12 @@ run_command(int argc __attribute__((unused)), char **argv, char *cmdline, const
if ((log_instance > 0) || processing_inputfile) {
if (processing_inputfile)
log_instance = current_input_index;
- HDsprintf(logpath, "%s/%s_%s.log_%d", HDgetcwd(current_dir, sizeof(current_dir)), logbase,
- thisapp, log_instance);
+ HDsnprintf(logpath, sizeof(logpath), "%s/%s_%s.log_%d",
+ HDgetcwd(current_dir, sizeof(current_dir)), logbase, thisapp, log_instance);
}
else {
- HDsprintf(logpath, "%s/%s_%s.log", HDgetcwd(current_dir, sizeof(current_dir)), logbase,
- thisapp);
+ HDsnprintf(logpath, sizeof(logpath), "%s/%s_%s.log",
+ HDgetcwd(current_dir, sizeof(current_dir)), logbase, thisapp);
}
}
else {
@@ -1085,15 +1085,17 @@ run_command(int argc __attribute__((unused)), char **argv, char *cmdline, const
if (processing_inputfile)
log_instance = current_input_index;
if (txtlog[log_len - 1] == '/')
- HDsprintf(logpath, "%s%s_%s.log_%d", txtlog, logbase, thisapp, log_instance);
+ HDsnprintf(logpath, sizeof(logpath), "%s%s_%s.log_%d", txtlog, logbase, thisapp,
+ log_instance);
else
- HDsprintf(logpath, "%s/%s_%s.log_%d", txtlog, logbase, thisapp, log_instance);
+ HDsnprintf(logpath, sizeof(logpath), "%s/%s_%s.log_%d", txtlog, logbase, thisapp,
+ log_instance);
}
else {
if (txtlog[log_len - 1] == '/')
- HDsprintf(logpath, "%s%s_%s.log", txtlog, logbase, thisapp);
+ HDsnprintf(logpath, sizeof(logpath), "%s%s_%s.log", txtlog, logbase, thisapp);
else
- HDsprintf(logpath, "%s/%s_%s.log", txtlog, logbase, thisapp);
+ HDsnprintf(logpath, sizeof(logpath), "%s/%s_%s.log", txtlog, logbase, thisapp);
}
}
@@ -1204,7 +1206,7 @@ MFU_PRED_EXEC(mfu_flist flist, uint64_t idx, void *arg)
}
}
- HDsprintf(cmdline, "\n---------\nCommand:");
+ HDsnprintf(cmdline, sizeof(cmdline), "\n---------\nCommand:");
b_offset = strlen(cmdline);
for (k = 0; k < count; k++) {
HDsprintf(&cmdline[b_offset], " %s", argv[k]);
@@ -1242,7 +1244,7 @@ static void
add_executable(int argc, char **argv, char *cmdstring, int *f_index, int f_count __attribute__((unused)))
{
char cmdline[2048];
- HDsprintf(cmdline, "\n---------\nCommand: %s\n", cmdstring);
+ HDsnprintf(cmdline, sizeof(cmdline), "\n---------\nCommand: %s\n", cmdstring);
argv[argc] = NULL;
run_command(argc, argv, cmdline, argv[f_index[0]]);
return;
@@ -1322,7 +1324,7 @@ process_input_file(char *inputname, int myrank, int size)
}
int
-main(int argc, const char *argv[])
+main(int argc, char *argv[])
{
int i;
int rc = 0;
@@ -1352,7 +1354,7 @@ main(int argc, const char *argv[])
if (env_var) {
int enable = HDatoi(env_var);
if (enable) {
-
+
}
}
#endif
@@ -1392,7 +1394,7 @@ main(int argc, const char *argv[])
mfu_pred *pred_head = NULL;
while (!tool_selected) {
- opt = H5_get_option(argc, argv, s_opts, l_opts);
+ opt = H5_get_option(argc, (const char *const *)argv, s_opts, l_opts);
switch ((char)opt) {
default:
usage();